summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMarcel Ziswiler <marcel.ziswiler@toradex.com>2015-05-12 08:12:30 +0200
committerMarcel Ziswiler <marcel.ziswiler@toradex.com>2015-05-12 08:12:30 +0200
commitf1209c0da26fc54d8c567a039d7765b6ad23bd5b (patch)
tree2d80c67c8e2b22ce5ebee0f617c3c71b6763c8f2 /drivers
parentff73aabd871910b44bea051e41c5024584b1061b (diff)
parent834b98d0367931d6bec5a48fe87fddd9b877f688 (diff)
Merge branch 'tegra-nand-next' into tegra-next
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mtd/devices/block2mtd.c1
-rw-r--r--drivers/mtd/devices/doc2000.c2
-rw-r--r--drivers/mtd/devices/doc2001.c2
-rw-r--r--drivers/mtd/devices/doc2001plus.c2
-rw-r--r--drivers/mtd/devices/lart.c1
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/devices/sst25l.c1
-rw-r--r--drivers/mtd/devices/tegra_nand.c16
-rw-r--r--drivers/mtd/devices/tegra_nand.h7
-rw-r--r--drivers/mtd/mtdpart.c12
-rw-r--r--drivers/mtd/nand/nandsim.c5
-rw-r--r--drivers/mtd/ubi/Kconfig69
-rw-r--r--drivers/mtd/ubi/Makefile6
-rw-r--r--drivers/mtd/ubi/attach.c (renamed from drivers/mtd/ubi/scan.c)1247
-rw-r--r--drivers/mtd/ubi/build.c400
-rw-r--r--drivers/mtd/ubi/cdev.c59
-rw-r--r--drivers/mtd/ubi/debug.c300
-rw-r--r--drivers/mtd/ubi/debug.h162
-rw-r--r--drivers/mtd/ubi/eba.c257
-rw-r--r--drivers/mtd/ubi/fastmap.c1537
-rw-r--r--drivers/mtd/ubi/gluebi.c60
-rw-r--r--drivers/mtd/ubi/io.c313
-rw-r--r--drivers/mtd/ubi/kapi.c61
-rw-r--r--drivers/mtd/ubi/misc.c39
-rw-r--r--drivers/mtd/ubi/scan.h174
-rw-r--r--drivers/mtd/ubi/ubi-media.h145
-rw-r--r--drivers/mtd/ubi/ubi.h362
-rw-r--r--drivers/mtd/ubi/upd.c22
-rw-r--r--drivers/mtd/ubi/vmt.c86
-rw-r--r--drivers/mtd/ubi/vtbl.c246
-rw-r--r--drivers/mtd/ubi/wl.c892
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
32 files changed, 4476 insertions, 2015 deletions
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index b78f23169d4e..8cd983cdc643 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -284,6 +284,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
+ dev->mtd.writebufsize = PAGE_SIZE;
dev->mtd.type = MTD_RAM;
dev->mtd.flags = MTD_CAP_RAM;
dev->mtd.erase = block2mtd_erase;
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index f7fbf6025ef2..b249e1300c0c 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -567,7 +567,7 @@ void DoC2k_init(struct mtd_info *mtd)
mtd->flags = MTD_CAP_NANDFLASH;
mtd->size = 0;
mtd->erasesize = 0;
- mtd->writesize = 512;
+ mtd->writebufsize = mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 241192f05bc8..236af0f01002 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -349,7 +349,7 @@ void DoCMil_init(struct mtd_info *mtd)
/* FIXME: erase size is not always 8KiB */
mtd->erasesize = 0x2000;
- mtd->writesize = 512;
+ mtd->writebufsize = mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 09ae0adc3ad0..fed491f4b6f8 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -471,7 +471,7 @@ void DoCMilPlus_init(struct mtd_info *mtd)
mtd->size = 0;
mtd->erasesize = 0;
- mtd->writesize = 512;
+ mtd->writebufsize = mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 772a0ff89e0f..09d5b5aaea57 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -636,6 +636,7 @@ static int __init lart_flash_init (void)
mtd.name = module_name;
mtd.type = MTD_NORFLASH;
mtd.writesize = 1;
+ mtd.writebufsize = 4;
mtd.flags = MTD_CAP_NORFLASH;
mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 35180e475c4c..9fad104d4aab 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -930,6 +930,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.dev.parent = &spi->dev;
flash->page_size = info->page_size;
+ flash->mtd.writebufsize = flash->page_size;
if (info->addr_width)
flash->addr_width = info->addr_width;
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 83e80c65d6e7..ea22ae366ff1 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -406,6 +406,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
flash->mtd.flags = MTD_CAP_NORFLASH;
flash->mtd.erasesize = flash_info->erase_size;
flash->mtd.writesize = flash_info->page_size;
+ flash->mtd.writebufsize = flash_info->page_size;
flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
flash->mtd.erase = sst25l_erase;
flash->mtd.read = sst25l_read;
diff --git a/drivers/mtd/devices/tegra_nand.c b/drivers/mtd/devices/tegra_nand.c
index 6034f0b2f239..0a9edfdb8ba2 100644
--- a/drivers/mtd/devices/tegra_nand.c
+++ b/drivers/mtd/devices/tegra_nand.c
@@ -121,6 +121,7 @@ struct tegra_nand_info {
uint32_t command_reg;
uint32_t config_reg;
uint32_t dmactrl_reg;
+ uint32_t bch_config_reg;
struct completion cmd_complete;
struct completion dma_complete;
@@ -233,6 +234,7 @@ static struct {
REG_NAME(DEC_STATUS_REG),
REG_NAME(HWSTATUS_CMD_REG),
REG_NAME(HWSTATUS_MASK_REG),
+ REG_NAME(BCH_CONFIG_REG),
{0, NULL},
};
@@ -392,6 +394,7 @@ static void tegra_nand_prep_readid(struct tegra_nand_info *info)
writel(0, ADDR_REG1);
writel(0, ADDR_REG2);
writel(0, CONFIG_REG);
+ writel(0, BCH_CONFIG_REG);
}
static int
@@ -451,6 +454,7 @@ static int tegra_nand_cmd_reset(struct tegra_nand_info *info,
writel(0, ADDR_REG1);
writel(0, ADDR_REG2);
writel(0, CONFIG_REG);
+ writel(0, BCH_CONFIG_REG);
err = tegra_nand_go(info);
if (err != 0)
@@ -789,6 +793,7 @@ static inline void clear_regs(struct tegra_nand_info *info)
info->command_reg = 0;
info->config_reg = 0;
info->dmactrl_reg = 0;
+ info->bch_config_reg = 0;
}
static void
@@ -820,7 +825,7 @@ prep_transfer_dma(struct tegra_nand_info *info, int rx, int do_ecc,
if (rx) {
if (do_ecc)
- info->config_reg |= CONFIG_HW_ERR_CORRECTION;
+ info->bch_config_reg |= (BCH_CONFIG_BCH_TVALUE(3) | BCH_CONFIG_BCH_ECC);
info->command_reg |= COMMAND_RX;
info->dmactrl_reg |= DMA_CTRL_REUSE_BUFFER;
writel(NAND_CMD_READ0, CMD_REG1);
@@ -833,10 +838,8 @@ prep_transfer_dma(struct tegra_nand_info *info, int rx, int do_ecc,
}
if (data_len) {
- if (do_ecc)
- info->config_reg |= CONFIG_HW_ECC | CONFIG_ECC_SEL;
info->config_reg |=
- CONFIG_PAGE_SIZE_SEL(page_size_sel) | CONFIG_TVALUE(0) |
+ CONFIG_PAGE_SIZE_SEL(page_size_sel) |
CONFIG_SKIP_SPARE | CONFIG_SKIP_SPARE_SEL(0);
info->command_reg |= COMMAND_A_VALID;
info->dmactrl_reg |= DMA_CTRL_DMA_EN_A;
@@ -857,7 +860,6 @@ prep_transfer_dma(struct tegra_nand_info *info, int rx, int do_ecc,
tag_sz += 4; /* size of tag ecc */
if (rx)
oob_len += 4; /* size of tag ecc */
- info->config_reg |= CONFIG_ECC_EN_TAG;
}
if (data_len && rx)
oob_len += 4; /* num of skipped bytes */
@@ -1039,6 +1041,7 @@ do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
datbuf_dma_addr, a_len, info->oob_dma_addr,
b_len);
writel(info->config_reg, CONFIG_REG);
+ writel(info->bch_config_reg, BCH_CONFIG_REG);
writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
INIT_COMPLETION(info->dma_complete);
@@ -1242,6 +1245,7 @@ do_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
b_len);
writel(info->config_reg, CONFIG_REG);
+ writel(info->bch_config_reg, BCH_CONFIG_REG);
writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
INIT_COMPLETION(info->dma_complete);
@@ -1374,6 +1378,7 @@ static void tegra_nand_resume(struct mtd_info *mtd)
IER_ECC_ERR | IER_GIE);
writel(0, CONFIG_REG);
+ writel(0, BCH_CONFIG_REG);
set_chip_timing(info, info->vendor_id,
info->device_id, info->dev_parms);
@@ -1427,6 +1432,7 @@ static int tegra_nand_scan(struct mtd_info *mtd, int maxchips)
writel(SCAN_TIMING_VAL, TIMING_REG);
writel(SCAN_TIMING2_VAL, TIMING2_REG);
writel(0, CONFIG_REG);
+ writel(0, BCH_CONFIG_REG);
select_chip(info, 0);
diff --git a/drivers/mtd/devices/tegra_nand.h b/drivers/mtd/devices/tegra_nand.h
index 339d6cc7330c..64d66381f8b1 100644
--- a/drivers/mtd/devices/tegra_nand.h
+++ b/drivers/mtd/devices/tegra_nand.h
@@ -56,6 +56,9 @@
#define LL_CONFIG_REG (TEGRA_NAND_BASE + 0x58)
#define LL_PTR_REG (TEGRA_NAND_BASE + 0x5c)
#define LL_STATUS_REG (TEGRA_NAND_BASE + 0x60)
+#define BCH_CONFIG_REG (TEGRA_NAND_BASE + 0xcc)
+#define BCH_DEC_RESULT_REG (TEGRA_NAND_BASE + 0xd0)
+#define BCH_DEC_STATUS_BUF_REG (TEGRA_NAND_BASE + 0xd4)
/* nand_command bits */
#define COMMAND_GO REG_BIT(31)
@@ -144,5 +147,9 @@
#define HWSTATUS_RBSY_MASK(val) REG_FIELD((val), 8, 8)
#define HWSTATUS_RBSY_EXP_VAL(val) REG_FIELD((val), 0, 8)
+/* nand bch config bits */
+#define BCH_CONFIG_BCH_TVALUE(val) REG_FIELD((val), 4, 2)
+#define BCH_CONFIG_BCH_ECC REG_BIT(0)
+
#endif
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 630be3e7da04..dd8cfa903b79 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -735,7 +735,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
}
EXPORT_SYMBOL_GPL(parse_mtd_partitions);
-int mtd_is_partition(struct mtd_info *mtd)
+int mtd_is_partition(const struct mtd_info *mtd)
{
struct mtd_part *part;
int ispart = 0;
@@ -751,3 +751,13 @@ int mtd_is_partition(struct mtd_info *mtd)
return ispart;
}
EXPORT_SYMBOL_GPL(mtd_is_partition);
+
+/* Returns the size of the entire flash chip */
+uint64_t mtd_get_device_size(const struct mtd_info *mtd)
+{
+ if (!mtd_is_partition(mtd))
+ return mtd->size;
+
+ return PART(mtd)->master->size;
+}
+EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 357e8c5252a8..84ee1a36eb2c 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1408,10 +1408,7 @@ int do_read_error(struct nandsim *ns, int num)
unsigned int page_no = ns->regs.row;
if (read_error(page_no)) {
- int i;
- memset(ns->buf.byte, 0xFF, num);
- for (i = 0; i < num; ++i)
- ns->buf.byte[i] = random32();
+ prandom_bytes(ns->buf.byte, num);
NS_WARN("simulating read error in page %u\n", page_no);
return 1;
}
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 4dcc752a0c0b..36663af56d89 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -27,20 +27,55 @@ config MTD_UBI_WL_THRESHOLD
life-cycle less than 10000, the threshold should be lessened (e.g.,
to 128 or 256, although it does not have to be power of 2).
-config MTD_UBI_BEB_RESERVE
- int "Percentage of reserved eraseblocks for bad eraseblocks handling"
- default 1
- range 0 25
+config MTD_UBI_BEB_LIMIT
+ int "Maximum expected bad eraseblock count per 1024 eraseblocks"
+ default 20
+ range 0 768
help
- If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI
- reserves some amount of physical eraseblocks to handle new bad
- eraseblocks. For example, if a flash physical eraseblock becomes bad,
- UBI uses these reserved physical eraseblocks to relocate the bad one.
- This option specifies how many physical eraseblocks will be reserved
- for bad eraseblock handling (percents of total number of good flash
- eraseblocks). If the underlying flash does not admit of bad
- eraseblocks (e.g. NOR flash), this value is ignored and nothing is
- reserved. Leave the default value if unsure.
+ This option specifies the maximum bad physical eraseblocks UBI
+ expects on the MTD device (per 1024 eraseblocks). If the underlying
+ flash does not admit of bad eraseblocks (e.g. NOR flash), this value
+ is ignored.
+
+ NAND datasheets often specify the minimum and maximum NVM (Number of
+ Valid Blocks) for the flashes' endurance lifetime. The maximum
+ expected bad eraseblocks per 1024 eraseblocks then can be calculated
+ as "1024 * (1 - MinNVB / MaxNVB)", which gives 20 for most NANDs
+ (MaxNVB is basically the total count of eraseblocks on the chip).
+
+ To put it differently, if this value is 20, UBI will try to reserve
+ about 1.9% of physical eraseblocks for bad blocks handling. And that
+ will be 1.9% of eraseblocks on the entire NAND chip, not just the MTD
+ partition UBI attaches. This means that if you have, say, a NAND
+ flash chip admits maximum 40 bad eraseblocks, and it is split on two
+ MTD partitions of the same size, UBI will reserve 40 eraseblocks when
+ attaching a partition.
+
+ This option can be overridden by the "mtd=" UBI module parameter or
+ by the "attach" ioctl.
+
+ Leave the default value if unsure.
+
+config MTD_UBI_FASTMAP
+ bool "UBI Fastmap (Experimental feature)"
+ default n
+ help
+ Important: this feature is experimental so far and the on-flash
+ format for fastmap may change in the next kernel versions
+
+ Fastmap is a mechanism which allows attaching an UBI device
+ in nearly constant time. Instead of scanning the whole MTD device it
+ only has to locate a checkpoint (called fastmap) on the device.
+ The on-flash fastmap contains all information needed to attach
+ the device. Using fastmap makes only sense on large devices where
+ attaching by scanning takes long. UBI will not automatically install
+ a fastmap on old images, but you can set the UBI module parameter
+ fm_autoconvert to 1 if you want so. Please note that fastmap-enabled
+ images are still usable with UBI implementations without
+ fastmap support. On typical flash devices the whole fastmap fits
+ into one PEB. UBI will reserve PEBs to hold two fastmaps.
+
+ If in doubt, say "N".
config MTD_UBI_GLUEBI
tristate "MTD devices emulation driver (gluebi)"
@@ -52,12 +87,4 @@ config MTD_UBI_GLUEBI
work on top of UBI. Do not enable this unless you use legacy
software.
-config MTD_UBI_DEBUG
- bool "UBI debugging"
- depends on SYSFS
- select DEBUG_FS
- select KALLSYMS
- help
- This option enables UBI debugging.
-
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index c9302a5452b0..b46b0c978581 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_MTD_UBI) += ubi.o
-ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o
-ubi-y += misc.o
+ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
+ubi-y += misc.o debug.o
+ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
-ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/attach.c
index a3a198f9b98d..5da50606affd 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/attach.c
@@ -19,21 +19,21 @@
*/
/*
- * UBI scanning sub-system.
+ * UBI attaching sub-system.
*
- * This sub-system is responsible for scanning the flash media, checking UBI
- * headers and providing complete information about the UBI flash image.
+ * This sub-system is responsible for attaching MTD devices and it also
+ * implements flash media scanning.
*
- * The scanning information is represented by a &struct ubi_scan_info' object.
- * Information about found volumes is represented by &struct ubi_scan_volume
+ * The attaching information is represented by a &struct ubi_attach_info'
+ * object. Information about volumes is represented by &struct ubi_ainf_volume
* objects which are kept in volume RB-tree with root at the @volumes field.
* The RB-tree is indexed by the volume ID.
*
- * Scanned logical eraseblocks are represented by &struct ubi_scan_leb objects.
- * These objects are kept in per-volume RB-trees with the root at the
- * corresponding &struct ubi_scan_volume object. To put it differently, we keep
- * an RB-tree of per-volume objects and each of these objects is the root of
- * RB-tree of per-eraseblock objects.
+ * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
+ * objects are kept in per-volume RB-trees with the root at the corresponding
+ * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
+ * per-volume objects and each of these objects is the root of RB-tree of
+ * per-LEB objects.
*
* Corrupted physical eraseblocks are put to the @corr list, free physical
* eraseblocks are put to the @free list and the physical eraseblock to be
@@ -51,34 +51,35 @@
*
* 1. Corruptions caused by power cuts. These are expected corruptions and UBI
* tries to handle them gracefully, without printing too many warnings and
- * error messages. The idea is that we do not lose important data in these case
- * - we may lose only the data which was being written to the media just before
- * the power cut happened, and the upper layers (e.g., UBIFS) are supposed to
- * handle such data losses (e.g., by using the FS journal).
+ * error messages. The idea is that we do not lose important data in these
+ * cases - we may lose only the data which were being written to the media just
+ * before the power cut happened, and the upper layers (e.g., UBIFS) are
+ * supposed to handle such data losses (e.g., by using the FS journal).
*
* When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
* the reason is a power cut, UBI puts this PEB to the @erase list, and all
* PEBs in the @erase list are scheduled for erasure later.
*
* 2. Unexpected corruptions which are not caused by power cuts. During
- * scanning, such PEBs are put to the @corr list and UBI preserves them.
+ * attaching, such PEBs are put to the @corr list and UBI preserves them.
* Obviously, this lessens the amount of available PEBs, and if at some point
* UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
* about such PEBs every time the MTD device is attached.
*
* However, it is difficult to reliably distinguish between these types of
- * corruptions and UBI's strategy is as follows. UBI assumes corruption type 2
- * if the VID header is corrupted and the data area does not contain all 0xFFs,
- * and there were no bit-flips or integrity errors while reading the data area.
- * Otherwise UBI assumes corruption type 1. So the decision criteria are as
- * follows.
- * o If the data area contains only 0xFFs, there is no data, and it is safe
+ * corruptions and UBI's strategy is as follows (in case of attaching by
+ * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
+ * the data area does not contain all 0xFFs, and there were no bit-flips or
+ * integrity errors (e.g., ECC errors in case of NAND) while reading the data
+ * area. Otherwise UBI assumes corruption type 1. So the decision criteria
+ * are as follows.
+ * o If the data area contains only 0xFFs, there are no data, and it is safe
* to just erase this PEB - this is corruption type 1.
* o If the data area has bit-flips or data integrity errors (ECC errors on
* NAND), it is probably a PEB which was being erased when power cut
* happened, so this is corruption type 1. However, this is just a guess,
* which might be wrong.
- * o Otherwise this it corruption type 2.
+ * o Otherwise this is corruption type 2.
*/
#include <linux/err.h>
@@ -88,11 +89,7 @@
#include <linux/random.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
-#else
-#define paranoid_check_si(ubi, si) 0
-#endif
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
/* Temporary variables used during scanning */
static struct ubi_ec_hdr *ech;
@@ -100,13 +97,18 @@ static struct ubi_vid_hdr *vidh;
/**
* add_to_list - add physical eraseblock to a list.
- * @si: scanning information
+ * @ai: attaching information
* @pnum: physical eraseblock number to add
+ * @vol_id: the last used volume id for the PEB
+ * @lnum: the last used LEB number for the PEB
* @ec: erase counter of the physical eraseblock
* @to_head: if not zero, add to the head of the list
* @list: the list to add to
*
- * This function adds physical eraseblock @pnum to free, erase, or alien lists.
+ * This function allocates a 'struct ubi_ainf_peb' object for physical
+ * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
+ * It stores the @lnum and @vol_id alongside, which can both be
+ * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
* If @to_head is not zero, PEB will be added to the head of the list, which
* basically means it will be processed first later. E.g., we add corrupted
* PEBs (corrupted due to power cuts) to the head of the erase list to make
@@ -114,65 +116,68 @@ static struct ubi_vid_hdr *vidh;
* returns zero in case of success and a negative error code in case of
* failure.
*/
-static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head,
- struct list_head *list)
+static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
+ int lnum, int ec, int to_head, struct list_head *list)
{
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
- if (list == &si->free) {
+ if (list == &ai->free) {
dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
- } else if (list == &si->erase) {
+ } else if (list == &ai->erase) {
dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
- } else if (list == &si->alien) {
+ } else if (list == &ai->alien) {
dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
- si->alien_peb_count += 1;
+ ai->alien_peb_count += 1;
} else
BUG();
- seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
- if (!seb)
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
return -ENOMEM;
- seb->pnum = pnum;
- seb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->ec = ec;
if (to_head)
- list_add(&seb->u.list, list);
+ list_add(&aeb->u.list, list);
else
- list_add_tail(&seb->u.list, list);
+ list_add_tail(&aeb->u.list, list);
return 0;
}
/**
* add_corrupted - add a corrupted physical eraseblock.
- * @si: scanning information
+ * @ai: attaching information
* @pnum: physical eraseblock number to add
* @ec: erase counter of the physical eraseblock
*
- * This function adds corrupted physical eraseblock @pnum to the 'corr' list.
- * The corruption was presumably not caused by a power cut. Returns zero in
- * case of success and a negative error code in case of failure.
+ * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
+ * physical eraseblock @pnum and adds it to the 'corr' list. The corruption
+ * was presumably not caused by a power cut. Returns zero in case of success
+ * and a negative error code in case of failure.
*/
-static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
+static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
{
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
- seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
- if (!seb)
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
return -ENOMEM;
- si->corr_peb_count += 1;
- seb->pnum = pnum;
- seb->ec = ec;
- list_add(&seb->u.list, &si->corr);
+ ai->corr_peb_count += 1;
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ list_add(&aeb->u.list, &ai->corr);
return 0;
}
/**
* validate_vid_hdr - check volume identifier header.
* @vid_hdr: the volume identifier header to check
- * @sv: information about the volume this logical eraseblock belongs to
+ * @av: information about the volume this logical eraseblock belongs to
* @pnum: physical eraseblock number the VID header came from
*
* This function checks that data stored in @vid_hdr is consistent. Returns
@@ -184,15 +189,15 @@ static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
* headers of the same volume.
*/
static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
- const struct ubi_scan_volume *sv, int pnum)
+ const struct ubi_ainf_volume *av, int pnum)
{
int vol_type = vid_hdr->vol_type;
int vol_id = be32_to_cpu(vid_hdr->vol_id);
int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
int data_pad = be32_to_cpu(vid_hdr->data_pad);
- if (sv->leb_count != 0) {
- int sv_vol_type;
+ if (av->leb_count != 0) {
+ int av_vol_type;
/*
* This is not the first logical eraseblock belonging to this
@@ -200,28 +205,28 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
* to the data in previous logical eraseblock headers.
*/
- if (vol_id != sv->vol_id) {
- dbg_err("inconsistent vol_id");
+ if (vol_id != av->vol_id) {
+ ubi_err("inconsistent vol_id");
goto bad;
}
- if (sv->vol_type == UBI_STATIC_VOLUME)
- sv_vol_type = UBI_VID_STATIC;
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av_vol_type = UBI_VID_STATIC;
else
- sv_vol_type = UBI_VID_DYNAMIC;
+ av_vol_type = UBI_VID_DYNAMIC;
- if (vol_type != sv_vol_type) {
- dbg_err("inconsistent vol_type");
+ if (vol_type != av_vol_type) {
+ ubi_err("inconsistent vol_type");
goto bad;
}
- if (used_ebs != sv->used_ebs) {
- dbg_err("inconsistent used_ebs");
+ if (used_ebs != av->used_ebs) {
+ ubi_err("inconsistent used_ebs");
goto bad;
}
- if (data_pad != sv->data_pad) {
- dbg_err("inconsistent data_pad");
+ if (data_pad != av->data_pad) {
+ ubi_err("inconsistent data_pad");
goto bad;
}
}
@@ -230,74 +235,74 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
bad:
ubi_err("inconsistent VID header at PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_sv(sv);
+ ubi_dump_vid_hdr(vid_hdr);
+ ubi_dump_av(av);
return -EINVAL;
}
/**
- * add_volume - add volume to the scanning information.
- * @si: scanning information
+ * add_volume - add volume to the attaching information.
+ * @ai: attaching information
* @vol_id: ID of the volume to add
* @pnum: physical eraseblock number
* @vid_hdr: volume identifier header
*
* If the volume corresponding to the @vid_hdr logical eraseblock is already
- * present in the scanning information, this function does nothing. Otherwise
- * it adds corresponding volume to the scanning information. Returns a pointer
- * to the scanning volume object in case of success and a negative error code
- * in case of failure.
+ * present in the attaching information, this function does nothing. Otherwise
+ * it adds corresponding volume to the attaching information. Returns a pointer
+ * to the allocated "av" object in case of success and a negative error code in
+ * case of failure.
*/
-static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
- int pnum,
+static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
+ int vol_id, int pnum,
const struct ubi_vid_hdr *vid_hdr)
{
- struct ubi_scan_volume *sv;
- struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
/* Walk the volume RB-tree to look if this volume is already present */
while (*p) {
parent = *p;
- sv = rb_entry(parent, struct ubi_scan_volume, rb);
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
- if (vol_id == sv->vol_id)
- return sv;
+ if (vol_id == av->vol_id)
+ return av;
- if (vol_id > sv->vol_id)
+ if (vol_id > av->vol_id)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
/* The volume is absent - add it */
- sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL);
- if (!sv)
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
return ERR_PTR(-ENOMEM);
- sv->highest_lnum = sv->leb_count = 0;
- sv->vol_id = vol_id;
- sv->root = RB_ROOT;
- sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
- sv->data_pad = be32_to_cpu(vid_hdr->data_pad);
- sv->compat = vid_hdr->compat;
- sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->root = RB_ROOT;
+ av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ av->data_pad = be32_to_cpu(vid_hdr->data_pad);
+ av->compat = vid_hdr->compat;
+ av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
: UBI_STATIC_VOLUME;
- if (vol_id > si->highest_vol_id)
- si->highest_vol_id = vol_id;
+ if (vol_id > ai->highest_vol_id)
+ ai->highest_vol_id = vol_id;
- rb_link_node(&sv->rb, parent, p);
- rb_insert_color(&sv->rb, &si->volumes);
- si->vols_found += 1;
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+ ai->vols_found += 1;
dbg_bld("added volume %d", vol_id);
- return sv;
+ return av;
}
/**
- * compare_lebs - find out which logical eraseblock is newer.
+ * ubi_compare_lebs - find out which logical eraseblock is newer.
* @ubi: UBI device description object
- * @seb: first logical eraseblock to compare
+ * @aeb: first logical eraseblock to compare
* @pnum: physical eraseblock number of the second logical eraseblock to
* compare
* @vid_hdr: volume identifier header of the second logical eraseblock
@@ -306,7 +311,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
* case of success this function returns a positive value, in case of failure, a
* negative error code is returned. The success return codes use the following
* bits:
- * o bit 0 is cleared: the first PEB (described by @seb) is newer than the
+ * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
* second PEB (described by @pnum and @vid_hdr);
* o bit 0 is set: the second PEB is newer;
* o bit 1 is cleared: no bit-flips were detected in the newer LEB;
@@ -314,16 +319,15 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
* o bit 2 is cleared: the older LEB is not corrupted;
* o bit 2 is set: the older LEB is corrupted.
*/
-static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr)
{
- void *buf;
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
struct ubi_vid_hdr *vh = NULL;
unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
- if (sqnum2 == seb->sqnum) {
+ if (sqnum2 == aeb->sqnum) {
/*
* This must be a really ancient UBI image which has been
* created before sequence numbers support has been added. At
@@ -332,12 +336,12 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
* support these images anymore. Well, those images still work,
* but only if no unclean reboots happened.
*/
- ubi_err("unsupported on-flash UBI format\n");
+ ubi_err("unsupported on-flash UBI format");
return -EINVAL;
}
/* Obviously the LEB with lower sequence counter is older */
- second_is_newer = !!(sqnum2 > seb->sqnum);
+ second_is_newer = (sqnum2 > aeb->sqnum);
/*
* Now we know which copy is newer. If the copy flag of the PEB with
@@ -356,7 +360,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
return 1;
}
} else {
- if (!seb->copy_flag) {
+ if (!aeb->copy_flag) {
/* It is not a copy, so it is newer */
dbg_bld("first PEB %d is newer, copy_flag is unset",
pnum);
@@ -367,14 +371,14 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
if (!vh)
return -ENOMEM;
- pnum = seb->pnum;
+ pnum = aeb->pnum;
err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
if (err) {
if (err == UBI_IO_BITFLIPS)
bitflips = 1;
else {
- dbg_err("VID of PEB %d header is bad, but it "
- "was OK earlier, err %d", pnum, err);
+ ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
+ pnum, err);
if (err > 0)
err = -EIO;
@@ -388,18 +392,14 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
/* Read the data of the copy and check the CRC */
len = be32_to_cpu(vid_hdr->data_size);
- buf = vmalloc(len);
- if (!buf) {
- err = -ENOMEM;
- goto out_free_vidh;
- }
- err = ubi_io_read_data(ubi, buf, pnum, 0, len);
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
- goto out_free_buf;
+ goto out_unlock;
data_crc = be32_to_cpu(vid_hdr->data_crc);
- crc = crc32(UBI_CRC32_INIT, buf, len);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
if (crc != data_crc) {
dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
pnum, crc, data_crc);
@@ -410,8 +410,8 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
dbg_bld("PEB %d CRC is OK", pnum);
bitflips = !!err;
}
+ mutex_unlock(&ubi->buf_mutex);
- vfree(buf);
ubi_free_vid_hdr(ubi, vh);
if (second_is_newer)
@@ -421,17 +421,17 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
return second_is_newer | (bitflips << 1) | (corrupted << 2);
-out_free_buf:
- vfree(buf);
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
out_free_vidh:
ubi_free_vid_hdr(ubi, vh);
return err;
}
/**
- * ubi_scan_add_used - add physical eraseblock to the scanning information.
+ * ubi_add_to_av - add used physical eraseblock to the attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @pnum: the physical eraseblock number
* @ec: erase counter
* @vid_hdr: the volume identifier header
@@ -444,14 +444,13 @@ out_free_vidh:
* to be picked, while the older one has to be dropped. This function returns
* zero in case of success and a negative error code in case of failure.
*/
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
- int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
- int bitflips)
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
{
int err, vol_id, lnum;
unsigned long long sqnum;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
struct rb_node **p, *parent = NULL;
vol_id = be32_to_cpu(vid_hdr->vol_id);
@@ -461,25 +460,25 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
pnum, vol_id, lnum, ec, sqnum, bitflips);
- sv = add_volume(si, vol_id, pnum, vid_hdr);
- if (IS_ERR(sv))
- return PTR_ERR(sv);
+ av = add_volume(ai, vol_id, pnum, vid_hdr);
+ if (IS_ERR(av))
+ return PTR_ERR(av);
- if (si->max_sqnum < sqnum)
- si->max_sqnum = sqnum;
+ if (ai->max_sqnum < sqnum)
+ ai->max_sqnum = sqnum;
/*
* Walk the RB-tree of logical eraseblocks of volume @vol_id to look
* if this is the first instance of this logical eraseblock or not.
*/
- p = &sv->root.rb_node;
+ p = &av->root.rb_node;
while (*p) {
int cmp_res;
parent = *p;
- seb = rb_entry(parent, struct ubi_scan_leb, u.rb);
- if (lnum != seb->lnum) {
- if (lnum < seb->lnum)
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (lnum != aeb->lnum) {
+ if (lnum < aeb->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -491,8 +490,8 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* logical eraseblock present.
*/
- dbg_bld("this LEB already exists: PEB %d, sqnum %llu, "
- "EC %d", seb->pnum, seb->sqnum, seb->ec);
+ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
+ aeb->pnum, aeb->sqnum, aeb->ec);
/*
* Make sure that the logical eraseblocks have different
@@ -503,15 +502,15 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* sequence numbers. We still can attach these images, unless
* there is a need to distinguish between old and new
* eraseblocks, in which case we'll refuse the image in
- * 'compare_lebs()'. In other words, we attach old clean
+ * 'ubi_compare_lebs()'. In other words, we attach old clean
* images, but refuse attaching old images with duplicated
* logical eraseblocks because there was an unclean reboot.
*/
- if (seb->sqnum == sqnum && sqnum != 0) {
+ if (aeb->sqnum == sqnum && sqnum != 0) {
ubi_err("two LEBs with same sequence number %llu",
sqnum);
- ubi_dbg_dump_seb(seb, 0);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_vid_hdr(vid_hdr);
return -EINVAL;
}
@@ -519,7 +518,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* Now we have to drop the older one and preserve the newer
* one.
*/
- cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr);
+ cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
if (cmp_res < 0)
return cmp_res;
@@ -528,23 +527,26 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* This logical eraseblock is newer than the one
* found earlier.
*/
- err = validate_vid_hdr(vid_hdr, sv, pnum);
+ err = validate_vid_hdr(vid_hdr, av, pnum);
if (err)
return err;
- err = add_to_list(si, seb->pnum, seb->ec, cmp_res & 4,
- &si->erase);
+ err = add_to_list(ai, aeb->pnum, aeb->vol_id,
+ aeb->lnum, aeb->ec, cmp_res & 4,
+ &ai->erase);
if (err)
return err;
- seb->ec = ec;
- seb->pnum = pnum;
- seb->scrub = ((cmp_res & 2) || bitflips);
- seb->copy_flag = vid_hdr->copy_flag;
- seb->sqnum = sqnum;
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = ((cmp_res & 2) || bitflips);
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
- if (sv->highest_lnum == lnum)
- sv->last_data_size =
+ if (av->highest_lnum == lnum)
+ av->last_data_size =
be32_to_cpu(vid_hdr->data_size);
return 0;
@@ -553,63 +555,64 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* This logical eraseblock is older than the one found
* previously.
*/
- return add_to_list(si, pnum, ec, cmp_res & 4,
- &si->erase);
+ return add_to_list(ai, pnum, vol_id, lnum, ec,
+ cmp_res & 4, &ai->erase);
}
}
/*
* We've met this logical eraseblock for the first time, add it to the
- * scanning information.
+ * attaching information.
*/
- err = validate_vid_hdr(vid_hdr, sv, pnum);
+ err = validate_vid_hdr(vid_hdr, av, pnum);
if (err)
return err;
- seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
- if (!seb)
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
return -ENOMEM;
- seb->ec = ec;
- seb->pnum = pnum;
- seb->lnum = lnum;
- seb->scrub = bitflips;
- seb->copy_flag = vid_hdr->copy_flag;
- seb->sqnum = sqnum;
-
- if (sv->highest_lnum <= lnum) {
- sv->highest_lnum = lnum;
- sv->last_data_size = be32_to_cpu(vid_hdr->data_size);
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = bitflips;
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum <= lnum) {
+ av->highest_lnum = lnum;
+ av->last_data_size = be32_to_cpu(vid_hdr->data_size);
}
- sv->leb_count += 1;
- rb_link_node(&seb->u.rb, parent, p);
- rb_insert_color(&seb->u.rb, &sv->root);
+ av->leb_count += 1;
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
return 0;
}
/**
- * ubi_scan_find_sv - find volume in the scanning information.
- * @si: scanning information
+ * ubi_find_av - find volume in the attaching information.
+ * @ai: attaching information
* @vol_id: the requested volume ID
*
* This function returns a pointer to the volume description or %NULL if there
- * are no data about this volume in the scanning information.
+ * are no data about this volume in the attaching information.
*/
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
- int vol_id)
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id)
{
- struct ubi_scan_volume *sv;
- struct rb_node *p = si->volumes.rb_node;
+ struct ubi_ainf_volume *av;
+ struct rb_node *p = ai->volumes.rb_node;
while (p) {
- sv = rb_entry(p, struct ubi_scan_volume, rb);
+ av = rb_entry(p, struct ubi_ainf_volume, rb);
- if (vol_id == sv->vol_id)
- return sv;
+ if (vol_id == av->vol_id)
+ return av;
- if (vol_id > sv->vol_id)
+ if (vol_id > av->vol_id)
p = p->rb_left;
else
p = p->rb_right;
@@ -619,63 +622,34 @@ struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
}
/**
- * ubi_scan_find_seb - find LEB in the volume scanning information.
- * @sv: a pointer to the volume scanning information
- * @lnum: the requested logical eraseblock
- *
- * This function returns a pointer to the scanning logical eraseblock or %NULL
- * if there are no data about it in the scanning volume information.
+ * ubi_remove_av - delete attaching information about a volume.
+ * @ai: attaching information
+ * @av: the volume attaching information to delete
*/
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
- int lnum)
-{
- struct ubi_scan_leb *seb;
- struct rb_node *p = sv->root.rb_node;
-
- while (p) {
- seb = rb_entry(p, struct ubi_scan_leb, u.rb);
-
- if (lnum == seb->lnum)
- return seb;
-
- if (lnum > seb->lnum)
- p = p->rb_left;
- else
- p = p->rb_right;
- }
-
- return NULL;
-}
-
-/**
- * ubi_scan_rm_volume - delete scanning information about a volume.
- * @si: scanning information
- * @sv: the volume scanning information to delete
- */
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
{
struct rb_node *rb;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
- dbg_bld("remove scanning information about volume %d", sv->vol_id);
+ dbg_bld("remove attaching information about volume %d", av->vol_id);
- while ((rb = rb_first(&sv->root))) {
- seb = rb_entry(rb, struct ubi_scan_leb, u.rb);
- rb_erase(&seb->u.rb, &sv->root);
- list_add_tail(&seb->u.list, &si->erase);
+ while ((rb = rb_first(&av->root))) {
+ aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, &ai->erase);
}
- rb_erase(&sv->rb, &si->volumes);
- kfree(sv);
- si->vols_found -= 1;
+ rb_erase(&av->rb, &ai->volumes);
+ kfree(av);
+ ai->vols_found -= 1;
}
/**
- * ubi_scan_erase_peb - erase a physical eraseblock.
+ * early_erase_peb - erase a physical eraseblock.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @pnum: physical eraseblock number to erase;
- * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown)
+ * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
*
* This function erases physical eraseblock 'pnum', and writes the erase
* counter header to it. This function should only be used on UBI device
@@ -683,8 +657,8 @@ void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
- int pnum, int ec)
+static int early_erase_peb(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai, int pnum, int ec)
{
int err;
struct ubi_ec_hdr *ec_hdr;
@@ -716,9 +690,9 @@ out_free:
}
/**
- * ubi_scan_get_free_peb - get a free physical eraseblock.
+ * ubi_early_get_peb - get a free physical eraseblock.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns a free physical eraseblock. It is supposed to be
* called on the UBI initialization stages when the wear-leveling sub-system is
@@ -726,20 +700,20 @@ out_free:
* the lists, writes the EC header if it is needed, and removes it from the
* list.
*
- * This function returns scanning physical eraseblock information in case of
- * success and an error code in case of failure.
+ * This function returns a pointer to the "aeb" of the found free PEB in case
+ * of success and an error code in case of failure.
*/
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
{
int err = 0;
- struct ubi_scan_leb *seb, *tmp_seb;
+ struct ubi_ainf_peb *aeb, *tmp_aeb;
- if (!list_empty(&si->free)) {
- seb = list_entry(si->free.next, struct ubi_scan_leb, u.list);
- list_del(&seb->u.list);
- dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec);
- return seb;
+ if (!list_empty(&ai->free)) {
+ aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+ list_del(&aeb->u.list);
+ dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
}
/*
@@ -748,18 +722,18 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
* so forth. We don't want to take care about bad eraseblocks here -
* they'll be handled later.
*/
- list_for_each_entry_safe(seb, tmp_seb, &si->erase, u.list) {
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
- err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1);
+ err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
if (err)
continue;
- seb->ec += 1;
- list_del(&seb->u.list);
- dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec);
- return seb;
+ aeb->ec += 1;
+ list_del(&aeb->u.list);
+ dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
}
ubi_err("no free eraseblocks");
@@ -769,7 +743,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
/**
* check_corruption - check the data area of PEB.
* @ubi: UBI device description object
- * @vid_hrd: the (corrupted) VID header of this PEB
+ * @vid_hdr: the (corrupted) VID header of this PEB
* @pnum: the physical eraseblock number to check
*
* This is a helper function which is used to distinguish between VID header
@@ -789,9 +763,9 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
int err;
mutex_lock(&ubi->buf_mutex);
- memset(ubi->peb_buf1, 0x00, ubi->leb_size);
+ memset(ubi->peb_buf, 0x00, ubi->leb_size);
- err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start,
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
ubi->leb_size);
if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
/*
@@ -808,17 +782,17 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
if (err)
goto out_unlock;
- if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size))
+ if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
goto out_unlock;
- ubi_err("PEB %d contains corrupted VID header, and the data does not "
- "contain all 0xFF, this may be a non-UBI PEB or a severe VID "
- "header corruption which requires manual inspection", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- dbg_msg("hexdump of PEB %d offset %d, length %d",
- pnum, ubi->leb_start, ubi->leb_size);
+ ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+ pnum);
+ ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
+ ubi_dump_vid_hdr(vid_hdr);
+ pr_err("hexdump of PEB %d offset %d, length %d",
+ pnum, ubi->leb_start, ubi->leb_size);
ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
- ubi->peb_buf1, ubi->leb_size, 1);
+ ubi->peb_buf, ubi->leb_size, 1);
err = 1;
out_unlock:
@@ -827,19 +801,23 @@ out_unlock:
}
/**
- * process_eb - read, check UBI headers, and add them to scanning information.
+ * scan_peb - scan and process UBI headers of a PEB.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @pnum: the physical eraseblock number
+ * @vid: The volume ID of the found volume will be stored in this pointer
+ * @sqnum: The sqnum of the found volume will be stored in this pointer
*
- * This function returns a zero if the physical eraseblock was successfully
- * handled and a negative error code in case of failure.
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
*/
-static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
- int pnum)
+static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int pnum, int *vid, unsigned long long *sqnum)
{
long long uninitialized_var(ec);
- int err, bitflips = 0, vol_id, ec_err = 0;
+ int err, bitflips = 0, vol_id = -1, ec_err = 0;
dbg_bld("scan PEB %d", pnum);
@@ -848,12 +826,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
if (err < 0)
return err;
else if (err) {
- /*
- * FIXME: this is actually duty of the I/O sub-system to
- * initialize this, but MTD does not provide enough
- * information.
- */
- si->bad_peb_count += 1;
+ ai->bad_peb_count += 1;
return 0;
}
@@ -867,13 +840,13 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
bitflips = 1;
break;
case UBI_IO_FF:
- si->empty_peb_count += 1;
- return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 0,
- &si->erase);
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 0, &ai->erase);
case UBI_IO_FF_BITFLIPS:
- si->empty_peb_count += 1;
- return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 1,
- &si->erase);
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 1, &ai->erase);
case UBI_IO_BAD_HDR_EBADMSG:
case UBI_IO_BAD_HDR:
/*
@@ -882,7 +855,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
* moved and EC be re-created.
*/
ec_err = err;
- ec = UBI_SCAN_UNKNOWN_EC;
+ ec = UBI_UNKNOWN;
bitflips = 1;
break;
default:
@@ -911,7 +884,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
*/
ubi_err("erase counter overflow, max is %d",
UBI_MAX_ERASECOUNTER);
- ubi_dbg_dump_ec_hdr(ech);
+ ubi_dump_ec_hdr(ech);
return -EINVAL;
}
@@ -931,9 +904,9 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
ubi->image_seq = image_seq;
if (ubi->image_seq && image_seq &&
ubi->image_seq != image_seq) {
- ubi_err("bad image sequence number %d in PEB %d, "
- "expected %d", image_seq, pnum, ubi->image_seq);
- ubi_dbg_dump_ec_hdr(ech);
+ ubi_err("bad image sequence number %d in PEB %d, expected %d",
+ image_seq, pnum, ubi->image_seq);
+ ubi_dump_ec_hdr(ech);
return -EINVAL;
}
}
@@ -957,7 +930,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
* PEB, bit it is not marked as bad yet. This may also
* be a result of power cut during erasure.
*/
- si->maybe_bad_peb_count += 1;
+ ai->maybe_bad_peb_count += 1;
case UBI_IO_BAD_HDR:
if (ec_err)
/*
@@ -984,23 +957,27 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
return err;
else if (!err)
/* This corruption is caused by a power cut */
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
else
/* This is an unexpected corruption */
- err = add_corrupted(si, pnum, ec);
+ err = add_corrupted(ai, pnum, ec);
if (err)
return err;
goto adjust_mean_ec;
case UBI_IO_FF_BITFLIPS:
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ ec, 1, &ai->erase);
if (err)
return err;
goto adjust_mean_ec;
case UBI_IO_FF:
- if (ec_err)
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ if (ec_err || bitflips)
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
else
- err = add_to_list(si, pnum, ec, 0, &si->free);
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 0, &ai->free);
if (err)
return err;
goto adjust_mean_ec;
@@ -1011,30 +988,38 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
}
vol_id = be32_to_cpu(vidh->vol_id);
+ if (vid)
+ *vid = vol_id;
+ if (sqnum)
+ *sqnum = be64_to_cpu(vidh->sqnum);
if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
- ubi_msg("\"delete\" compatible internal volume %d:%d"
- " found, will remove it", vol_id, lnum);
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ if (vol_id != UBI_FM_SB_VOLUME_ID
+ && vol_id != UBI_FM_DATA_VOLUME_ID) {
+ ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
+ }
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 1, &ai->erase);
if (err)
return err;
return 0;
case UBI_COMPAT_RO:
- ubi_msg("read-only compatible internal volume %d:%d"
- " found, switch to read-only mode",
+ ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
vol_id, lnum);
ubi->ro_mode = 1;
break;
case UBI_COMPAT_PRESERVE:
- ubi_msg("\"preserve\" compatible internal volume %d:%d"
- " found", vol_id, lnum);
- err = add_to_list(si, pnum, ec, 0, &si->alien);
+ ubi_msg("\"preserve\" compatible internal volume %d:%d found",
+ vol_id, lnum);
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 0, &ai->alien);
if (err)
return err;
return 0;
@@ -1049,40 +1034,40 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
if (ec_err)
ubi_warn("valid VID header but corrupted EC header at PEB %d",
pnum);
- err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips);
+ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
if (err)
return err;
adjust_mean_ec:
if (!ec_err) {
- si->ec_sum += ec;
- si->ec_count += 1;
- if (ec > si->max_ec)
- si->max_ec = ec;
- if (ec < si->min_ec)
- si->min_ec = ec;
+ ai->ec_sum += ec;
+ ai->ec_count += 1;
+ if (ec > ai->max_ec)
+ ai->max_ec = ec;
+ if (ec < ai->min_ec)
+ ai->min_ec = ec;
}
return 0;
}
/**
- * check_what_we_have - check what PEB were found by scanning.
+ * late_analysis - analyze the overall situation with PEB.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
- * This is a helper function which takes a look what PEBs were found by
- * scanning, and decides whether the flash is empty and should be formatted and
- * whether there are too many corrupted PEBs and we should not attach this
- * MTD device. Returns zero if we should proceed with attaching the MTD device,
- * and %-EINVAL if we should not.
+ * This is a helper function which takes a look what PEBs we have after we
+ * gather information about all of them ("ai" is compete). It decides whether
+ * the flash is empty and should be formatted of whether there are too many
+ * corrupted PEBs and we should not attach this MTD device. Returns zero if we
+ * should proceed with attaching the MTD device, and %-EINVAL if we should not.
*/
-static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
+static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
int max_corr, peb_count;
- peb_count = ubi->peb_count - si->bad_peb_count - si->alien_peb_count;
+ peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
max_corr = peb_count / 20 ?: 8;
/*
@@ -1090,25 +1075,25 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
* unclean reboots. However, many of them may indicate some problems
* with the flash HW or driver.
*/
- if (si->corr_peb_count) {
+ if (ai->corr_peb_count) {
ubi_err("%d PEBs are corrupted and preserved",
- si->corr_peb_count);
- printk(KERN_ERR "Corrupted PEBs are:");
- list_for_each_entry(seb, &si->corr, u.list)
- printk(KERN_CONT " %d", seb->pnum);
- printk(KERN_CONT "\n");
+ ai->corr_peb_count);
+ pr_err("Corrupted PEBs are:");
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ pr_cont(" %d", aeb->pnum);
+ pr_cont("\n");
/*
* If too many PEBs are corrupted, we refuse attaching,
* otherwise, only print a warning.
*/
- if (si->corr_peb_count >= max_corr) {
+ if (ai->corr_peb_count >= max_corr) {
ubi_err("too many corrupted PEBs, refusing");
return -EINVAL;
}
}
- if (si->empty_peb_count + si->maybe_bad_peb_count == peb_count) {
+ if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
/*
* All PEBs are empty, or almost all - a couple PEBs look like
* they may be bad PEBs which were not marked as bad yet.
@@ -1124,14 +1109,13 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
* 2. Flash contains non-UBI data and we do not want to format
* it and destroy possibly important information.
*/
- if (si->maybe_bad_peb_count <= 2) {
- si->is_empty = 1;
+ if (ai->maybe_bad_peb_count <= 2) {
+ ai->is_empty = 1;
ubi_msg("empty MTD device detected");
get_random_bytes(&ubi->image_seq,
sizeof(ubi->image_seq));
} else {
- ubi_err("MTD device is not UBI-formatted and possibly "
- "contains non-UBI data - refusing it");
+ ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
return -EINVAL;
}
@@ -1141,61 +1125,137 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
}
/**
- * ubi_scan - scan an MTD device.
+ * destroy_av - free volume attaching information.
+ * @av: volume attaching information
+ * @ai: attaching information
+ *
+ * This function destroys the volume attaching information.
+ */
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *this = av->root.rb_node;
+
+ while (this) {
+ if (this->rb_left)
+ this = this->rb_left;
+ else if (this->rb_right)
+ this = this->rb_right;
+ else {
+ aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
+ this = rb_parent(this);
+ if (this) {
+ if (this->rb_left == &aeb->u.rb)
+ this->rb_left = NULL;
+ else
+ this->rb_right = NULL;
+ }
+
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ }
+ kfree(av);
+}
+
+/**
+ * destroy_ai - destroy attaching information.
+ * @ai: attaching information
+ */
+static void destroy_ai(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb, *aeb_tmp;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb;
+
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+
+ /* Destroy the volume RB-tree */
+ rb = ai->volumes.rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ av = rb_entry(rb, struct ubi_ainf_volume, rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &av->rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ destroy_av(ai, av);
+ }
+ }
+
+ if (ai->aeb_slab_cache)
+ kmem_cache_destroy(ai->aeb_slab_cache);
+
+ kfree(ai);
+}
+
+/**
+ * scan_all - scan entire MTD device.
* @ubi: UBI device description object
+ * @ai: attach info object
+ * @start: start scanning at this PEB
*
* This function does full scanning of an MTD device and returns complete
- * information about it. In case of failure, an error code is returned.
+ * information about it in form of a "struct ubi_attach_info" object. In case
+ * of failure, an error code is returned.
*/
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
+static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int start)
{
int err, pnum;
struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb;
- struct ubi_scan_info *si;
-
- si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL);
- if (!si)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&si->corr);
- INIT_LIST_HEAD(&si->free);
- INIT_LIST_HEAD(&si->erase);
- INIT_LIST_HEAD(&si->alien);
- si->volumes = RB_ROOT;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
err = -ENOMEM;
- si->scan_leb_slab = kmem_cache_create("ubi_scan_leb_slab",
- sizeof(struct ubi_scan_leb),
- 0, 0, NULL);
- if (!si->scan_leb_slab)
- goto out_si;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
- goto out_slab;
+ return err;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
goto out_ech;
- for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ for (pnum = start; pnum < ubi->peb_count; pnum++) {
cond_resched();
dbg_gen("process PEB %d", pnum);
- err = process_eb(ubi, si, pnum);
+ err = scan_peb(ubi, ai, pnum, NULL, NULL);
if (err < 0)
goto out_vidh;
}
- dbg_msg("scanning is finished");
+ ubi_msg("scanning is finished");
/* Calculate mean erase counter */
- if (si->ec_count)
- si->mean_ec = div_u64(si->ec_sum, si->ec_count);
+ if (ai->ec_count)
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
- err = check_what_we_have(ubi, si);
+ err = late_analysis(ubi, ai);
if (err)
goto out_vidh;
@@ -1203,280 +1263,371 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
* In case of unknown erase counter we use the mean erase counter
* value.
*/
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
}
- list_for_each_entry(seb, &si->free, u.list) {
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
}
- list_for_each_entry(seb, &si->corr, u.list)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
- list_for_each_entry(seb, &si->erase, u.list)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
- err = paranoid_check_si(ubi, si);
+ err = self_check_ai(ubi, ai);
if (err)
goto out_vidh;
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
- return si;
+ return 0;
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
-out_slab:
- kmem_cache_destroy(si->scan_leb_slab);
-out_si:
- ubi_scan_destroy_si(si);
- return ERR_PTR(err);
+ return err;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+
/**
- * destroy_sv - free the scanning volume information
- * @sv: scanning volume information
- * @si: scanning information
+ * scan_fastmap - try to find a fastmap and attach from it.
+ * @ubi: UBI device description object
+ * @ai: attach info object
*
- * This function destroys the volume RB-tree (@sv->root) and the scanning
- * volume information.
+ * Returns 0 on success, negative return values indicate an internal
+ * error.
+ * UBI_NO_FASTMAP denotes that no fastmap was found.
+ * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
*/
-static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
+static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- struct ubi_scan_leb *seb;
- struct rb_node *this = sv->root.rb_node;
+ int err, pnum, fm_anchor = -1;
+ unsigned long long max_sqnum = 0;
- while (this) {
- if (this->rb_left)
- this = this->rb_left;
- else if (this->rb_right)
- this = this->rb_right;
- else {
- seb = rb_entry(this, struct ubi_scan_leb, u.rb);
- this = rb_parent(this);
- if (this) {
- if (this->rb_left == &seb->u.rb)
- this->rb_left = NULL;
- else
- this->rb_right = NULL;
- }
+ err = -ENOMEM;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ goto out;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
+ int vol_id = -1;
+ unsigned long long sqnum = -1;
+ cond_resched();
- kmem_cache_free(si->scan_leb_slab, seb);
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
+ if (err < 0)
+ goto out_vidh;
+
+ if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
+ max_sqnum = sqnum;
+ fm_anchor = pnum;
}
}
- kfree(sv);
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ if (fm_anchor < 0)
+ return UBI_NO_FASTMAP;
+
+ return ubi_scan_fastmap(ubi, ai, fm_anchor);
+
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+out:
+ return err;
+}
+
+#endif
+
+static struct ubi_attach_info *alloc_ai(const char *slab_name)
+{
+ struct ubi_attach_info *ai;
+
+ ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+ if (!ai)
+ return ai;
+
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->aeb_slab_cache = kmem_cache_create(slab_name,
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ kfree(ai);
+ ai = NULL;
+ }
+
+ return ai;
}
/**
- * ubi_scan_destroy_si - destroy scanning information.
- * @si: scanning information
+ * ubi_attach - attach an MTD device.
+ * @ubi: UBI device descriptor
+ * @force_scan: if set to non-zero attach by scanning
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
*/
-void ubi_scan_destroy_si(struct ubi_scan_info *si)
+int ubi_attach(struct ubi_device *ubi, int force_scan)
{
- struct ubi_scan_leb *seb, *seb_tmp;
- struct ubi_scan_volume *sv;
- struct rb_node *rb;
+ int err;
+ struct ubi_attach_info *ai;
- list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
- }
- list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
- }
- list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
+ ai = alloc_ai("ubi_aeb_slab_cache");
+ if (!ai)
+ return -ENOMEM;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* On small flash devices we disable fastmap in any case. */
+ if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
+ ubi->fm_disabled = 1;
+ force_scan = 1;
}
- list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
+
+ if (force_scan)
+ err = scan_all(ubi, ai, 0);
+ else {
+ err = scan_fast(ubi, ai);
+ if (err > 0) {
+ if (err != UBI_NO_FASTMAP) {
+ destroy_ai(ai);
+ ai = alloc_ai("ubi_aeb_slab_cache2");
+ if (!ai)
+ return -ENOMEM;
+ }
+
+ err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ }
}
+#else
+ err = scan_all(ubi, ai, 0);
+#endif
+ if (err)
+ goto out_ai;
- /* Destroy the volume RB-tree */
- rb = si->volumes.rb_node;
- while (rb) {
- if (rb->rb_left)
- rb = rb->rb_left;
- else if (rb->rb_right)
- rb = rb->rb_right;
- else {
- sv = rb_entry(rb, struct ubi_scan_volume, rb);
+ ubi->bad_peb_count = ai->bad_peb_count;
+ ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
+ ubi->corr_peb_count = ai->corr_peb_count;
+ ubi->max_ec = ai->max_ec;
+ ubi->mean_ec = ai->mean_ec;
+ dbg_gen("max. sequence number: %llu", ai->max_sqnum);
- rb = rb_parent(rb);
- if (rb) {
- if (rb->rb_left == &sv->rb)
- rb->rb_left = NULL;
- else
- rb->rb_right = NULL;
- }
+ err = ubi_read_volume_table(ubi, ai);
+ if (err)
+ goto out_ai;
+
+ err = ubi_wl_init(ubi, ai);
+ if (err)
+ goto out_vtbl;
+
+ err = ubi_eba_init(ubi, ai);
+ if (err)
+ goto out_wl;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
+ struct ubi_attach_info *scan_ai;
- destroy_sv(si, sv);
+ scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
+ if (!scan_ai)
+ goto out_wl;
+
+ err = scan_all(ubi, scan_ai, 0);
+ if (err) {
+ destroy_ai(scan_ai);
+ goto out_wl;
}
+
+ err = self_check_eba(ubi, ai, scan_ai);
+ destroy_ai(scan_ai);
+
+ if (err)
+ goto out_wl;
}
+#endif
- kmem_cache_destroy(si->scan_leb_slab);
- kfree(si);
-}
+ destroy_ai(ai);
+ return 0;
-#ifdef CONFIG_MTD_UBI_DEBUG
+out_wl:
+ ubi_wl_close(ubi);
+out_vtbl:
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+out_ai:
+ destroy_ai(ai);
+ return err;
+}
/**
- * paranoid_check_si - check the scanning information.
+ * self_check_ai - check the attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
- * This function returns zero if the scanning information is all right, and a
+ * This function returns zero if the attaching information is all right, and a
* negative error code if not or if an error occurred.
*/
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int pnum, err, vols_found = 0;
struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb, *last_seb;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *last_aeb;
uint8_t *buf;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
/*
- * At first, check that scanning information is OK.
+ * At first, check that attaching information is OK.
*/
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
int leb_count = 0;
cond_resched();
vols_found += 1;
- if (si->is_empty) {
+ if (ai->is_empty) {
ubi_err("bad is_empty flag");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->vol_id < 0 || sv->highest_lnum < 0 ||
- sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 ||
- sv->data_pad < 0 || sv->last_data_size < 0) {
+ if (av->vol_id < 0 || av->highest_lnum < 0 ||
+ av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
+ av->data_pad < 0 || av->last_data_size < 0) {
ubi_err("negative values");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->vol_id >= UBI_MAX_VOLUMES &&
- sv->vol_id < UBI_INTERNAL_VOL_START) {
+ if (av->vol_id >= UBI_MAX_VOLUMES &&
+ av->vol_id < UBI_INTERNAL_VOL_START) {
ubi_err("bad vol_id");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->vol_id > si->highest_vol_id) {
+ if (av->vol_id > ai->highest_vol_id) {
ubi_err("highest_vol_id is %d, but vol_id %d is there",
- si->highest_vol_id, sv->vol_id);
+ ai->highest_vol_id, av->vol_id);
goto out;
}
- if (sv->vol_type != UBI_DYNAMIC_VOLUME &&
- sv->vol_type != UBI_STATIC_VOLUME) {
+ if (av->vol_type != UBI_DYNAMIC_VOLUME &&
+ av->vol_type != UBI_STATIC_VOLUME) {
ubi_err("bad vol_type");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->data_pad > ubi->leb_size / 2) {
+ if (av->data_pad > ubi->leb_size / 2) {
ubi_err("bad data_pad");
- goto bad_sv;
+ goto bad_av;
}
- last_seb = NULL;
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
cond_resched();
- last_seb = seb;
+ last_aeb = aeb;
leb_count += 1;
- if (seb->pnum < 0 || seb->ec < 0) {
+ if (aeb->pnum < 0 || aeb->ec < 0) {
ubi_err("negative values");
- goto bad_seb;
+ goto bad_aeb;
}
- if (seb->ec < si->min_ec) {
- ubi_err("bad si->min_ec (%d), %d found",
- si->min_ec, seb->ec);
- goto bad_seb;
+ if (aeb->ec < ai->min_ec) {
+ ubi_err("bad ai->min_ec (%d), %d found",
+ ai->min_ec, aeb->ec);
+ goto bad_aeb;
}
- if (seb->ec > si->max_ec) {
- ubi_err("bad si->max_ec (%d), %d found",
- si->max_ec, seb->ec);
- goto bad_seb;
+ if (aeb->ec > ai->max_ec) {
+ ubi_err("bad ai->max_ec (%d), %d found",
+ ai->max_ec, aeb->ec);
+ goto bad_aeb;
}
- if (seb->pnum >= ubi->peb_count) {
+ if (aeb->pnum >= ubi->peb_count) {
ubi_err("too high PEB number %d, total PEBs %d",
- seb->pnum, ubi->peb_count);
- goto bad_seb;
+ aeb->pnum, ubi->peb_count);
+ goto bad_aeb;
}
- if (sv->vol_type == UBI_STATIC_VOLUME) {
- if (seb->lnum >= sv->used_ebs) {
+ if (av->vol_type == UBI_STATIC_VOLUME) {
+ if (aeb->lnum >= av->used_ebs) {
ubi_err("bad lnum or used_ebs");
- goto bad_seb;
+ goto bad_aeb;
}
} else {
- if (sv->used_ebs != 0) {
+ if (av->used_ebs != 0) {
ubi_err("non-zero used_ebs");
- goto bad_seb;
+ goto bad_aeb;
}
}
- if (seb->lnum > sv->highest_lnum) {
+ if (aeb->lnum > av->highest_lnum) {
ubi_err("incorrect highest_lnum or lnum");
- goto bad_seb;
+ goto bad_aeb;
}
}
- if (sv->leb_count != leb_count) {
+ if (av->leb_count != leb_count) {
ubi_err("bad leb_count, %d objects in the tree",
leb_count);
- goto bad_sv;
+ goto bad_av;
}
- if (!last_seb)
+ if (!last_aeb)
continue;
- seb = last_seb;
+ aeb = last_aeb;
- if (seb->lnum != sv->highest_lnum) {
+ if (aeb->lnum != av->highest_lnum) {
ubi_err("bad highest_lnum");
- goto bad_seb;
+ goto bad_aeb;
}
}
- if (vols_found != si->vols_found) {
- ubi_err("bad si->vols_found %d, should be %d",
- si->vols_found, vols_found);
+ if (vols_found != ai->vols_found) {
+ ubi_err("bad ai->vols_found %d, should be %d",
+ ai->vols_found, vols_found);
goto out;
}
- /* Check that scanning information is correct */
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- last_seb = NULL;
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ /* Check that attaching information is correct */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
int vol_type;
cond_resched();
- last_seb = seb;
+ last_aeb = aeb;
- err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1);
+ err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
if (err && err != UBI_IO_BITFLIPS) {
ubi_err("VID header is not OK (%d)", err);
if (err > 0)
@@ -1486,52 +1637,52 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
- if (sv->vol_type != vol_type) {
+ if (av->vol_type != vol_type) {
ubi_err("bad vol_type");
goto bad_vid_hdr;
}
- if (seb->sqnum != be64_to_cpu(vidh->sqnum)) {
- ubi_err("bad sqnum %llu", seb->sqnum);
+ if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
+ ubi_err("bad sqnum %llu", aeb->sqnum);
goto bad_vid_hdr;
}
- if (sv->vol_id != be32_to_cpu(vidh->vol_id)) {
- ubi_err("bad vol_id %d", sv->vol_id);
+ if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
+ ubi_err("bad vol_id %d", av->vol_id);
goto bad_vid_hdr;
}
- if (sv->compat != vidh->compat) {
+ if (av->compat != vidh->compat) {
ubi_err("bad compat %d", vidh->compat);
goto bad_vid_hdr;
}
- if (seb->lnum != be32_to_cpu(vidh->lnum)) {
- ubi_err("bad lnum %d", seb->lnum);
+ if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad lnum %d", aeb->lnum);
goto bad_vid_hdr;
}
- if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) {
- ubi_err("bad used_ebs %d", sv->used_ebs);
+ if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
+ ubi_err("bad used_ebs %d", av->used_ebs);
goto bad_vid_hdr;
}
- if (sv->data_pad != be32_to_cpu(vidh->data_pad)) {
- ubi_err("bad data_pad %d", sv->data_pad);
+ if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
+ ubi_err("bad data_pad %d", av->data_pad);
goto bad_vid_hdr;
}
}
- if (!last_seb)
+ if (!last_aeb)
continue;
- if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) {
- ubi_err("bad highest_lnum %d", sv->highest_lnum);
+ if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad highest_lnum %d", av->highest_lnum);
goto bad_vid_hdr;
}
- if (sv->last_data_size != be32_to_cpu(vidh->data_size)) {
- ubi_err("bad last_data_size %d", sv->last_data_size);
+ if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
+ ubi_err("bad last_data_size %d", av->last_data_size);
goto bad_vid_hdr;
}
}
@@ -1553,21 +1704,21 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
buf[pnum] = 1;
}
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb)
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
- buf[seb->pnum] = 1;
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->free, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->free, u.list)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->corr, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->erase, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->alien, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->alien, u.list)
+ buf[aeb->pnum] = 1;
err = 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++)
@@ -1581,25 +1732,23 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
goto out;
return 0;
-bad_seb:
- ubi_err("bad scanning information about LEB %d", seb->lnum);
- ubi_dbg_dump_seb(seb, 0);
- ubi_dbg_dump_sv(sv);
+bad_aeb:
+ ubi_err("bad attaching information about LEB %d", aeb->lnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_av(av);
goto out;
-bad_sv:
- ubi_err("bad scanning information about volume %d", sv->vol_id);
- ubi_dbg_dump_sv(sv);
+bad_av:
+ ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
goto out;
bad_vid_hdr:
- ubi_err("bad scanning information about volume %d", sv->vol_id);
- ubi_dbg_dump_sv(sv);
- ubi_dbg_dump_vid_hdr(vidh);
+ ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ ubi_dump_vid_hdr(vidh);
out:
- ubi_dbg_dump_stack();
+ dump_stack();
return -EINVAL;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 6c3fb5ab20f5..939018c27cb7 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -27,10 +27,6 @@
* module load parameters or the kernel boot parameters. If MTD devices were
* specified, UBI does not attach any MTD device, but it is possible to do
* later using the "UBI control device".
- *
- * At the moment we only attach UBI devices by scanning, which will become a
- * bottleneck when flashes reach certain large size. Then one may improve UBI
- * and add other methods, although it does not seem to be easy to do.
*/
#include <linux/err.h>
@@ -40,6 +36,7 @@
#include <linux/namei.h>
#include <linux/stat.h>
#include <linux/miscdevice.h>
+#include <linux/mtd/partitions.h>
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
@@ -49,6 +46,12 @@
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
+/* Maximum number of comma-separated items in the 'mtd=' parameter */
+#define MTD_PARAM_MAX_COUNT 4
+
+/* Maximum value for the number of bad PEBs per 1024 PEBs */
+#define MAX_MTD_UBI_BEB_LIMIT 768
+
#ifdef CONFIG_MTD_UBI_MODULE
#define ubi_is_module() 1
#else
@@ -60,10 +63,13 @@
* @name: MTD character device node path, MTD device name, or MTD device number
* string
* @vid_hdr_offs: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
*/
struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
+ int ubi_num;
int vid_hdr_offs;
+ int max_beb_per1024;
};
/* Numbers of elements set in the @mtd_dev_param array */
@@ -71,7 +77,10 @@ static int __initdata mtd_devs;
/* MTD devices specification parameters */
static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
-
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/* UBI module parameter to enable fastmap automatically on non-fastmap images */
+static bool fm_autoconvert;
+#endif
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class;
@@ -148,6 +157,19 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
ubi_do_get_device_info(ubi, &nt.di);
ubi_do_get_volume_info(ubi, vol, &nt.vi);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ switch (ntype) {
+ case UBI_VOLUME_ADDED:
+ case UBI_VOLUME_REMOVED:
+ case UBI_VOLUME_RESIZED:
+ case UBI_VOLUME_RENAMED:
+ if (ubi_update_fastmap(ubi)) {
+ ubi_err("Unable to update fastmap!");
+ ubi_ro_mode(ubi);
+ }
+ }
+#endif
return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
}
@@ -554,10 +576,10 @@ static void uif_close(struct ubi_device *ubi)
}
/**
- * free_internal_volumes - free internal volumes.
+ * ubi_free_internal_volumes - free internal volumes.
* @ubi: UBI device description object
*/
-static void free_internal_volumes(struct ubi_device *ubi)
+void ubi_free_internal_volumes(struct ubi_device *ubi)
{
int i;
@@ -568,62 +590,38 @@ static void free_internal_volumes(struct ubi_device *ubi)
}
}
-/**
- * attach_by_scanning - attach an MTD device using scanning method.
- * @ubi: UBI device descriptor
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- *
- * Note, currently this is the only method to attach UBI devices. Hopefully in
- * the future we'll have more scalable attaching methods and avoid full media
- * scanning. But even in this case scanning will be needed as a fall-back
- * attaching method if there are some on-flash table corruptions.
- */
-static int attach_by_scanning(struct ubi_device *ubi)
+static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
{
- int err;
- struct ubi_scan_info *si;
-
- si = ubi_scan(ubi);
- if (IS_ERR(si))
- return PTR_ERR(si);
-
- ubi->bad_peb_count = si->bad_peb_count;
- ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
- ubi->corr_peb_count = si->corr_peb_count;
- ubi->max_ec = si->max_ec;
- ubi->mean_ec = si->mean_ec;
- ubi_msg("max. sequence number: %llu", si->max_sqnum);
+ int limit, device_pebs;
+ uint64_t device_size;
- err = ubi_read_volume_table(ubi, si);
- if (err)
- goto out_si;
-
- err = ubi_wl_init_scan(ubi, si);
- if (err)
- goto out_vtbl;
+ if (!max_beb_per1024)
+ return 0;
- err = ubi_eba_init_scan(ubi, si);
- if (err)
- goto out_wl;
+ /*
+ * Here we are using size of the entire flash chip and
+ * not just the MTD partition size because the maximum
+ * number of bad eraseblocks is a percentage of the
+ * whole device and bad eraseblocks are not fairly
+ * distributed over the flash chip. So the worst case
+ * is that all the bad eraseblocks of the chip are in
+ * the MTD partition we are attaching (ubi->mtd).
+ */
+ device_size = mtd_get_device_size(ubi->mtd);
+ device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
+ limit = mult_frac(device_pebs, max_beb_per1024, 1024);
- ubi_scan_destroy_si(si);
- return 0;
+ /* Round it up */
+ if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
+ limit += 1;
-out_wl:
- ubi_wl_close(ubi);
-out_vtbl:
- free_internal_volumes(ubi);
- vfree(ubi->vtbl);
-out_si:
- ubi_scan_destroy_si(si);
- return err;
+ return limit;
}
/**
* io_init - initialize I/O sub-system for a given UBI device.
* @ubi: UBI device description object
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
*
* If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
* assumed:
@@ -636,8 +634,11 @@ out_si:
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int io_init(struct ubi_device *ubi)
+static int io_init(struct ubi_device *ubi, int max_beb_per1024)
{
+ dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+ dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
if (ubi->mtd->numeraseregions != 0) {
/*
* Some flashes have several erase regions. Different regions
@@ -664,8 +665,10 @@ static int io_init(struct ubi_device *ubi)
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi->flash_size = ubi->mtd->size;
- if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
+ if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) {
ubi->bad_allowed = 1;
+ ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
+ }
if (ubi->mtd->type == MTD_NORFLASH) {
ubi_assert(ubi->mtd->writesize == 1);
@@ -707,11 +710,11 @@ static int io_init(struct ubi_device *ubi)
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
- dbg_msg("min_io_size %d", ubi->min_io_size);
- dbg_msg("max_write_size %d", ubi->max_write_size);
- dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
- dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
- dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
+ dbg_gen("min_io_size %d", ubi->min_io_size);
+ dbg_gen("max_write_size %d", ubi->max_write_size);
+ dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+ dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
+ dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
if (ubi->vid_hdr_offset == 0)
/* Default offset */
@@ -728,10 +731,10 @@ static int io_init(struct ubi_device *ubi)
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
- dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
- dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
- dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift);
- dbg_msg("leb_start %d", ubi->leb_start);
+ dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
+ dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+ dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
+ dbg_gen("leb_start %d", ubi->leb_start);
/* The shift must be aligned to 32-bit boundary */
if (ubi->vid_hdr_shift % 4) {
@@ -757,7 +760,7 @@ static int io_init(struct ubi_device *ubi)
ubi->max_erroneous = ubi->peb_count / 10;
if (ubi->max_erroneous < 16)
ubi->max_erroneous = 16;
- dbg_msg("max_erroneous %d", ubi->max_erroneous);
+ dbg_gen("max_erroneous %d", ubi->max_erroneous);
/*
* It may happen that EC and VID headers are situated in one minimal
@@ -765,36 +768,24 @@ static int io_init(struct ubi_device *ubi)
* read-only mode.
*/
if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
- ubi_warn("EC and VID headers are in the same minimal I/O unit, "
- "switch to read-only mode");
+ ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
ubi->ro_mode = 1;
}
ubi->leb_size = ubi->peb_size - ubi->leb_start;
if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
- ubi_msg("MTD device %d is write-protected, attach in "
- "read-only mode", ubi->mtd->index);
+ ubi_msg("MTD device %d is write-protected, attach in read-only mode",
+ ubi->mtd->index);
ubi->ro_mode = 1;
}
- ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
- ubi->peb_size, ubi->peb_size >> 10);
- ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
- ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
- if (ubi->hdrs_min_io_size != ubi->min_io_size)
- ubi_msg("sub-page size: %d",
- ubi->hdrs_min_io_size);
- ubi_msg("VID header offset: %d (aligned %d)",
- ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
- ubi_msg("data offset: %d", ubi->leb_start);
-
/*
- * Note, ideally, we have to initialize ubi->bad_peb_count here. But
+ * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
* unfortunately, MTD does not provide this information. We should loop
* over all physical eraseblocks and invoke mtd->block_is_bad() for
- * each physical eraseblock. So, we skip ubi->bad_peb_count
- * uninitialized and initialize it after scanning.
+ * each physical eraseblock. So, we leave @ubi->bad_peb_count
+ * uninitialized so far.
*/
return 0;
@@ -805,7 +796,7 @@ static int io_init(struct ubi_device *ubi)
* @ubi: UBI device description object
* @vol_id: ID of the volume to re-size
*
- * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
+ * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
* the volume table to the largest possible size. See comments in ubi-header.h
* for more description of the flag. Returns zero in case of success and a
* negative error code in case of failure.
@@ -816,6 +807,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
struct ubi_volume *vol = ubi->volumes[vol_id];
int err, old_reserved_pebs = vol->reserved_pebs;
+ if (ubi->ro_mode) {
+ ubi_warn("skip auto-resize because of R/O mode");
+ return 0;
+ }
+
/*
* Clear the auto-resize flag in the volume in-memory copy of the
* volume table, and 'ubi_resize_volume()' will propagate this change
@@ -830,8 +826,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* No available PEBs to re-size the volume, clear the flag on
* flash and exit.
*/
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
ubi_err("cannot clean auto-resize flag for volume %d",
@@ -857,6 +852,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
* @vid_hdr_offset: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -867,11 +863,18 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024)
{
struct ubi_device *ubi;
int i, err, ref = 0;
+ if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
+ return -EINVAL;
+
+ if (!max_beb_per1024)
+ max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
+
/*
* Check if we already have the same MTD device attached.
*
@@ -881,7 +884,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && mtd->index == ubi->mtd->index) {
- dbg_err("mtd%d is already attached to ubi%d",
+ ubi_err("mtd%d is already attached to ubi%d",
mtd->index, i);
return -EEXIST;
}
@@ -896,8 +899,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
* no sense to attach emulated MTD devices, so we prohibit this.
*/
if (mtd->type == MTD_UBIVOLUME) {
- ubi_err("refuse attaching mtd%d - it is already emulated on "
- "top of UBI", mtd->index);
+ ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
+ mtd->index);
return -EINVAL;
}
@@ -907,7 +910,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi_devices[ubi_num])
break;
if (ubi_num == UBI_MAX_DEVICES) {
- dbg_err("only %d UBI devices may be created",
+ ubi_err("only %d UBI devices may be created",
UBI_MAX_DEVICES);
return -ENFILE;
}
@@ -917,7 +920,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
- dbg_err("ubi%d already exists", ubi_num);
+ ubi_err("ubi%d already exists", ubi_num);
return -EEXIST;
}
}
@@ -931,36 +934,62 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
ubi->vid_hdr_offset = vid_hdr_offset;
ubi->autoresize_vol_id = -1;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_pool.used = ubi->fm_pool.size = 0;
+ ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
+
+ /*
+ * fm_pool.max_size is 5% of the total number of PEBs but it's also
+ * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
+ */
+ ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
+ ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
+ if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
+ ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
+
+ ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
+ ubi->fm_disabled = !fm_autoconvert;
+
+ if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
+ <= UBI_FM_MAX_START) {
+ ubi_err("More than %i PEBs are needed for fastmap, sorry.",
+ UBI_FM_MAX_START);
+ ubi->fm_disabled = 1;
+ }
+
+ ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+#else
+ ubi->fm_disabled = 1;
+#endif
mutex_init(&ubi->buf_mutex);
mutex_init(&ubi->ckvol_mutex);
mutex_init(&ubi->device_mutex);
spin_lock_init(&ubi->volumes_lock);
+ mutex_init(&ubi->fm_mutex);
+ init_rwsem(&ubi->fm_sem);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
- dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
- dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
- err = io_init(ubi);
+ err = io_init(ubi, max_beb_per1024);
if (err)
goto out_free;
err = -ENOMEM;
- ubi->peb_buf1 = vmalloc(ubi->peb_size);
- if (!ubi->peb_buf1)
+ ubi->peb_buf = vmalloc(ubi->peb_size);
+ if (!ubi->peb_buf)
goto out_free;
- ubi->peb_buf2 = vmalloc(ubi->peb_size);
- if (!ubi->peb_buf2)
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_size = ubi_calc_fm_size(ubi);
+ ubi->fm_buf = vzalloc(ubi->fm_size);
+ if (!ubi->fm_buf)
goto out_free;
-
- err = ubi_debugging_init_dev(ubi);
- if (err)
- goto out_free;
-
- err = attach_by_scanning(ubi);
+#endif
+ err = ubi_attach(ubi, 0);
if (err) {
- dbg_err("failed to attach by scanning, error %d", err);
- goto out_debugging;
+ ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
+ goto out_free;
}
if (ubi->autoresize_vol_id != -1) {
@@ -985,23 +1014,24 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
goto out_debugfs;
}
- ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
- ubi_msg("MTD device name: \"%s\"", mtd->name);
- ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
- ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
- ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
- ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count);
- ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
- ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
- ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
- ubi_msg("number of user volumes: %d",
- ubi->vol_count - UBI_INT_VOL_COUNT);
- ubi_msg("available PEBs: %d", ubi->avail_pebs);
- ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
- ubi_msg("number of PEBs reserved for bad PEB handling: %d",
- ubi->beb_rsvd_pebs);
- ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
- ubi_msg("image sequence number: %d", ubi->image_seq);
+ ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
+ mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
+ ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+ ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
+ ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
+ ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
+ ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
+ ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
+ ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+ ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
+ ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
+ ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
+ ubi->vtbl_slots);
+ ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+ ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
+ ubi->image_seq);
+ ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+ ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
/*
* The below lock makes sure we do not race with 'ubi_thread()' which
@@ -1024,13 +1054,11 @@ out_uif:
uif_close(ubi);
out_detach:
ubi_wl_close(ubi);
- free_internal_volumes(ubi);
+ ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
-out_debugging:
- ubi_debugging_exit_dev(ubi);
out_free:
- vfree(ubi->peb_buf1);
- vfree(ubi->peb_buf2);
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
if (ref)
put_device(&ubi->dev);
else
@@ -1079,8 +1107,12 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_assert(ubi_num == ubi->ubi_num);
ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
- dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
-
+ ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If we don't write a new fastmap at detach time we lose all
+ * EC updates that have been made since the last written fastmap. */
+ ubi_update_fastmap(ubi);
+#endif
/*
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
@@ -1096,13 +1128,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
+
ubi_wl_close(ubi);
- free_internal_volumes(ubi);
+ ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
- ubi_debugging_exit_dev(ubi);
- vfree(ubi->peb_buf1);
- vfree(ubi->peb_buf2);
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
put_device(&ubi->dev);
return 0;
@@ -1230,12 +1262,16 @@ static int __init ubi_init(void)
mtd = open_mtd_device(p->name);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
- goto out_detach;
+ ubi_err("cannot open mtd %s, error %d", p->name, err);
+ /* See comment below re-ubi_is_module(). */
+ if (ubi_is_module())
+ goto out_detach;
+ continue;
}
mutex_lock(&ubi_devices_mutex);
- err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
- p->vid_hdr_offs);
+ err = ubi_attach_mtd_dev(mtd, p->ubi_num,
+ p->vid_hdr_offs, p->max_beb_per1024);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
ubi_err("cannot attach mtd%d", mtd->index);
@@ -1278,10 +1314,10 @@ out_version:
out_class:
class_destroy(ubi_class);
out:
- ubi_err("UBI error: cannot initialize UBI, error %d", err);
+ ubi_err("cannot initialize UBI, error %d", err);
return err;
}
-module_init(ubi_init);
+late_initcall(ubi_init);
static void __exit ubi_exit(void)
{
@@ -1315,8 +1351,7 @@ static int __init bytes_str_to_int(const char *str)
result = simple_strtoul(str, &endp, 0);
if (str == endp || result >= INT_MAX) {
- printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
- str);
+ ubi_err("incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
@@ -1332,8 +1367,7 @@ static int __init bytes_str_to_int(const char *str)
case '\0':
break;
default:
- printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
- str);
+ ubi_err("incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
@@ -1354,27 +1388,26 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
struct mtd_dev_param *p;
char buf[MTD_PARAM_LEN_MAX];
char *pbuf = &buf[0];
- char *tokens[2] = {NULL, NULL};
+ char *tokens[MTD_PARAM_MAX_COUNT], *token;
if (!val)
return -EINVAL;
if (mtd_devs == UBI_MAX_DEVICES) {
- printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
- UBI_MAX_DEVICES);
+ ubi_err("too many parameters, max. is %d\n",
+ UBI_MAX_DEVICES);
return -EINVAL;
}
len = strnlen(val, MTD_PARAM_LEN_MAX);
if (len == MTD_PARAM_LEN_MAX) {
- printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
- "max. is %d\n", val, MTD_PARAM_LEN_MAX);
+ ubi_err("parameter \"%s\" is too long, max. is %d\n",
+ val, MTD_PARAM_LEN_MAX);
return -EINVAL;
}
if (len == 0) {
- printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
- "ignored\n");
+ pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
return 0;
}
@@ -1384,42 +1417,69 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
if (buf[len - 1] == '\n')
buf[len - 1] = '\0';
- for (i = 0; i < 2; i++)
+ for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
tokens[i] = strsep(&pbuf, ",");
if (pbuf) {
- printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
- val);
+ ubi_err("too many arguments at \"%s\"\n", val);
return -EINVAL;
}
p = &mtd_dev_param[mtd_devs];
strcpy(&p->name[0], tokens[0]);
- if (tokens[1])
- p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
+ token = tokens[1];
+ if (token) {
+ p->vid_hdr_offs = bytes_str_to_int(token);
- if (p->vid_hdr_offs < 0)
- return p->vid_hdr_offs;
+ if (p->vid_hdr_offs < 0)
+ return p->vid_hdr_offs;
+ }
+
+ token = tokens[2];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->max_beb_per1024);
+
+ if (err) {
+ ubi_err("bad value for max_beb_per1024 parameter: %s",
+ token);
+ return -EINVAL;
+ }
+ }
+
+ token = tokens[3];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->ubi_num);
+
+ if (err) {
+ ubi_err("bad value for ubi_num parameter: %s", token);
+ return -EINVAL;
+ }
+ } else
+ p->ubi_num = UBI_DEV_NUM_AUTO;
mtd_devs += 1;
return 0;
}
module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
-MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
- "mtd=<name|num|path>[,<vid_hdr_offs>].\n"
+MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
"Multiple \"mtd\" parameters may be specified.\n"
- "MTD devices may be specified by their number, name, or "
- "path to the MTD character device node.\n"
- "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
- "header position to be used by UBI.\n"
- "Example 1: mtd=/dev/mtd0 - attach MTD device "
- "/dev/mtd0.\n"
- "Example 2: mtd=content,1984 mtd=4 - attach MTD device "
- "with name \"content\" using VID header offset 1984, and "
- "MTD device number 4 with default VID header offset.");
-
+ "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
+ "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
+ "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
+ __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
+ "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
+ "\n"
+ "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
+ "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
+ "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
+ "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
+ "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
+#ifdef CONFIG_MTD_UBI_FASTMAP
+module_param(fm_autoconvert, bool, 0644);
+MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
+#endif
MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
MODULE_AUTHOR("Artem Bityutskiy");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 3320a50ba4f0..dfcc65b33e99 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -63,7 +63,7 @@ static int get_exclusive(struct ubi_volume_desc *desc)
users = vol->readers + vol->writers + vol->exclusive;
ubi_assert(users > 0);
if (users > 1) {
- dbg_err("%d users for volume %d", users, vol->vol_id);
+ ubi_err("%d users for volume %d", users, vol->vol_id);
err = -EBUSY;
} else {
vol->readers = vol->writers = 0;
@@ -140,9 +140,9 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
vol->updating = 0;
vfree(vol->upd_buf);
} else if (vol->changing_leb) {
- dbg_gen("only %lld of %lld bytes received for atomic LEB change"
- " for volume %d:%d, cancel", vol->upd_received,
- vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
+ dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
+ vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
+ vol->vol_id);
vol->changing_leb = 0;
vfree(vol->upd_buf);
}
@@ -159,7 +159,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
if (vol->updating) {
/* Update is in progress, seeking is prohibited */
- dbg_err("updating");
+ ubi_err("updating");
return -EBUSY;
}
@@ -178,7 +178,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
}
if (new_offset < 0 || new_offset > vol->used_bytes) {
- dbg_err("bad seek %lld", new_offset);
+ ubi_err("bad seek %lld", new_offset);
return -EINVAL;
}
@@ -189,7 +189,8 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
return new_offset;
}
-static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
+ int datasync)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
@@ -216,11 +217,11 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
count, *offp, vol->vol_id);
if (vol->updating) {
- dbg_err("updating");
+ ubi_err("updating");
return -EBUSY;
}
if (vol->upd_marker) {
- dbg_err("damaged volume, update marker is set");
+ ubi_err("damaged volume, update marker is set");
return -EBADF;
}
if (*offp == vol->used_bytes || count == 0)
@@ -300,7 +301,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
if (off & (ubi->min_io_size - 1)) {
- dbg_err("unaligned position");
+ ubi_err("unaligned position");
return -EINVAL;
}
@@ -309,7 +310,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
/* We can write only in fractions of the minimum I/O unit */
if (count & (ubi->min_io_size - 1)) {
- dbg_err("unaligned write length");
+ ubi_err("unaligned write length");
return -EINVAL;
}
@@ -334,8 +335,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
break;
}
- err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
- UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
if (err)
break;
@@ -477,9 +477,6 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
req.bytes < 0 || req.lnum >= vol->usable_leb_size)
break;
- if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
- req.dtype != UBI_UNKNOWN)
- break;
err = get_exclusive(desc);
if (err < 0)
@@ -518,7 +515,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
if (err)
break;
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
break;
}
@@ -532,7 +529,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
err = -EFAULT;
break;
}
- err = ubi_leb_map(desc, req.lnum, req.dtype);
+ err = ubi_leb_map(desc, req.lnum);
break;
}
@@ -632,6 +629,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
if (req->alignment != 1 && n)
goto bad;
+ if (!req->name[0] || !req->name_len)
+ goto bad;
+
if (req->name_len > UBI_VOL_NAME_MAX) {
err = -ENAMETOOLONG;
goto bad;
@@ -644,8 +644,8 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
return 0;
bad:
- dbg_err("bad volume creation request");
- ubi_dbg_dump_mkvol_req(req);
+ ubi_err("bad volume creation request");
+ ubi_dump_mkvol_req(req);
return err;
}
@@ -710,12 +710,12 @@ static int rename_volumes(struct ubi_device *ubi,
for (i = 0; i < req->count - 1; i++) {
for (n = i + 1; n < req->count; n++) {
if (req->ents[i].vol_id == req->ents[n].vol_id) {
- dbg_err("duplicated volume id %d",
+ ubi_err("duplicated volume id %d",
req->ents[i].vol_id);
return -EINVAL;
}
if (!strcmp(req->ents[i].name, req->ents[n].name)) {
- dbg_err("duplicated volume name \"%s\"",
+ ubi_err("duplicated volume name \"%s\"",
req->ents[i].name);
return -EINVAL;
}
@@ -738,7 +738,7 @@ static int rename_volumes(struct ubi_device *ubi,
re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
if (IS_ERR(re->desc)) {
err = PTR_ERR(re->desc);
- dbg_err("cannot open volume %d, error %d", vol_id, err);
+ ubi_err("cannot open volume %d, error %d", vol_id, err);
kfree(re);
goto out_free;
}
@@ -754,7 +754,7 @@ static int rename_volumes(struct ubi_device *ubi,
re->new_name_len = name_len;
memcpy(re->new_name, name, name_len);
list_add_tail(&re->list, &rename_list);
- dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
+ dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
vol_id, re->desc->vol->name, name);
}
@@ -797,7 +797,7 @@ static int rename_volumes(struct ubi_device *ubi,
continue;
/* The volume exists but busy, or an error occurred */
- dbg_err("cannot open volume \"%s\", error %d",
+ ubi_err("cannot open volume \"%s\", error %d",
re->new_name, err);
goto out_free;
}
@@ -812,7 +812,7 @@ static int rename_volumes(struct ubi_device *ubi,
re1->remove = 1;
re1->desc = desc;
list_add(&re1->list, &rename_list);
- dbg_msg("will remove volume %d, name \"%s\"",
+ dbg_gen("will remove volume %d, name \"%s\"",
re1->desc->vol->vol_id, re1->desc->vol->name);
}
@@ -943,7 +943,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
{
struct ubi_rnvol_req *req;
- dbg_msg("re-name volumes");
+ dbg_gen("re-name volumes");
req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
if (!req) {
err = -ENOMEM;
@@ -1011,7 +1011,8 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
* 'ubi_attach_mtd_dev()'.
*/
mutex_lock(&ubi_devices_mutex);
- err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
+ err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
+ req.max_beb_per1024);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
@@ -1027,7 +1028,7 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
{
int ubi_num;
- dbg_gen("dettach MTD device");
+ dbg_gen("detach MTD device");
err = get_user(ubi_num, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index ab80c0debac8..790bca4f9283 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -18,243 +18,203 @@
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
-/*
- * Here we keep all the UBI debugging stuff which should normally be disabled
- * and compiled-out, but it is extremely helpful when hunting bugs or doing big
- * changes.
- */
-
-#ifdef CONFIG_MTD_UBI_DEBUG
-
#include "ubi.h"
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+
/**
- * ubi_dbg_dump_ec_hdr - dump an erase counter header.
+ * ubi_dump_flash - dump a region of flash.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to dump
+ * @offset: the starting offset within the physical eraseblock to dump
+ * @len: the length of the region to dump
+ */
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+ int err;
+ size_t read;
+ void *buf;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ buf = vmalloc(len);
+ if (!buf)
+ return;
+ err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ if (err && err != -EUCLEAN) {
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
+ goto out;
+ }
+
+ ubi_msg("dumping %d bytes of data from PEB %d, offset %d",
+ len, pnum, offset);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+out:
+ vfree(buf);
+ return;
+}
+
+/**
+ * ubi_dump_ec_hdr - dump an erase counter header.
* @ec_hdr: the erase counter header to dump
*/
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
{
- printk(KERN_DEBUG "Erase counter header dump:\n");
- printk(KERN_DEBUG "\tmagic %#08x\n",
- be32_to_cpu(ec_hdr->magic));
- printk(KERN_DEBUG "\tversion %d\n", (int)ec_hdr->version);
- printk(KERN_DEBUG "\tec %llu\n",
- (long long)be64_to_cpu(ec_hdr->ec));
- printk(KERN_DEBUG "\tvid_hdr_offset %d\n",
- be32_to_cpu(ec_hdr->vid_hdr_offset));
- printk(KERN_DEBUG "\tdata_offset %d\n",
- be32_to_cpu(ec_hdr->data_offset));
- printk(KERN_DEBUG "\timage_seq %d\n",
- be32_to_cpu(ec_hdr->image_seq));
- printk(KERN_DEBUG "\thdr_crc %#08x\n",
- be32_to_cpu(ec_hdr->hdr_crc));
- printk(KERN_DEBUG "erase counter header hexdump:\n");
+ pr_err("Erase counter header dump:\n");
+ pr_err("\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic));
+ pr_err("\tversion %d\n", (int)ec_hdr->version);
+ pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
+ pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
+ pr_err("\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset));
+ pr_err("\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq));
+ pr_err("\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
+ pr_err("erase counter header hexdump:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ec_hdr, UBI_EC_HDR_SIZE, 1);
}
/**
- * ubi_dbg_dump_vid_hdr - dump a volume identifier header.
+ * ubi_dump_vid_hdr - dump a volume identifier header.
* @vid_hdr: the volume identifier header to dump
*/
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
{
- printk(KERN_DEBUG "Volume identifier header dump:\n");
- printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
- printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version);
- printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type);
- printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
- printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat);
- printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
- printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
- printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
- printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
- printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
- printk(KERN_DEBUG "\tsqnum %llu\n",
+ pr_err("Volume identifier header dump:\n");
+ pr_err("\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
+ pr_err("\tversion %d\n", (int)vid_hdr->version);
+ pr_err("\tvol_type %d\n", (int)vid_hdr->vol_type);
+ pr_err("\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
+ pr_err("\tcompat %d\n", (int)vid_hdr->compat);
+ pr_err("\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
+ pr_err("\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
+ pr_err("\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
+ pr_err("\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
+ pr_err("\tsqnum %llu\n",
(unsigned long long)be64_to_cpu(vid_hdr->sqnum));
- printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
- printk(KERN_DEBUG "Volume identifier header hexdump:\n");
+ pr_err("\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
+ pr_err("Volume identifier header hexdump:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
vid_hdr, UBI_VID_HDR_SIZE, 1);
}
/**
- * ubi_dbg_dump_vol_info- dump volume information.
+ * ubi_dump_vol_info - dump volume information.
* @vol: UBI volume description object
*/
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
+void ubi_dump_vol_info(const struct ubi_volume *vol)
{
- printk(KERN_DEBUG "Volume information dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", vol->vol_id);
- printk(KERN_DEBUG "\treserved_pebs %d\n", vol->reserved_pebs);
- printk(KERN_DEBUG "\talignment %d\n", vol->alignment);
- printk(KERN_DEBUG "\tdata_pad %d\n", vol->data_pad);
- printk(KERN_DEBUG "\tvol_type %d\n", vol->vol_type);
- printk(KERN_DEBUG "\tname_len %d\n", vol->name_len);
- printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size);
- printk(KERN_DEBUG "\tused_ebs %d\n", vol->used_ebs);
- printk(KERN_DEBUG "\tused_bytes %lld\n", vol->used_bytes);
- printk(KERN_DEBUG "\tlast_eb_bytes %d\n", vol->last_eb_bytes);
- printk(KERN_DEBUG "\tcorrupted %d\n", vol->corrupted);
- printk(KERN_DEBUG "\tupd_marker %d\n", vol->upd_marker);
+ pr_err("Volume information dump:\n");
+ pr_err("\tvol_id %d\n", vol->vol_id);
+ pr_err("\treserved_pebs %d\n", vol->reserved_pebs);
+ pr_err("\talignment %d\n", vol->alignment);
+ pr_err("\tdata_pad %d\n", vol->data_pad);
+ pr_err("\tvol_type %d\n", vol->vol_type);
+ pr_err("\tname_len %d\n", vol->name_len);
+ pr_err("\tusable_leb_size %d\n", vol->usable_leb_size);
+ pr_err("\tused_ebs %d\n", vol->used_ebs);
+ pr_err("\tused_bytes %lld\n", vol->used_bytes);
+ pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
+ pr_err("\tcorrupted %d\n", vol->corrupted);
+ pr_err("\tupd_marker %d\n", vol->upd_marker);
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
- printk(KERN_DEBUG "\tname %s\n", vol->name);
+ pr_err("\tname %s\n", vol->name);
} else {
- printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
vol->name[0], vol->name[1], vol->name[2],
vol->name[3], vol->name[4]);
}
}
/**
- * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
* @r: the object to dump
* @idx: volume table index
*/
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
{
int name_len = be16_to_cpu(r->name_len);
- printk(KERN_DEBUG "Volume table record %d dump:\n", idx);
- printk(KERN_DEBUG "\treserved_pebs %d\n",
- be32_to_cpu(r->reserved_pebs));
- printk(KERN_DEBUG "\talignment %d\n", be32_to_cpu(r->alignment));
- printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(r->data_pad));
- printk(KERN_DEBUG "\tvol_type %d\n", (int)r->vol_type);
- printk(KERN_DEBUG "\tupd_marker %d\n", (int)r->upd_marker);
- printk(KERN_DEBUG "\tname_len %d\n", name_len);
+ pr_err("Volume table record %d dump:\n", idx);
+ pr_err("\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs));
+ pr_err("\talignment %d\n", be32_to_cpu(r->alignment));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(r->data_pad));
+ pr_err("\tvol_type %d\n", (int)r->vol_type);
+ pr_err("\tupd_marker %d\n", (int)r->upd_marker);
+ pr_err("\tname_len %d\n", name_len);
if (r->name[0] == '\0') {
- printk(KERN_DEBUG "\tname NULL\n");
+ pr_err("\tname NULL\n");
return;
}
if (name_len <= UBI_VOL_NAME_MAX &&
strnlen(&r->name[0], name_len + 1) == name_len) {
- printk(KERN_DEBUG "\tname %s\n", &r->name[0]);
+ pr_err("\tname %s\n", &r->name[0]);
} else {
- printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
r->name[0], r->name[1], r->name[2], r->name[3],
r->name[4]);
}
- printk(KERN_DEBUG "\tcrc %#08x\n", be32_to_cpu(r->crc));
+ pr_err("\tcrc %#08x\n", be32_to_cpu(r->crc));
}
/**
- * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object.
- * @sv: the object to dump
+ * ubi_dump_av - dump a &struct ubi_ainf_volume object.
+ * @av: the object to dump
*/
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
+void ubi_dump_av(const struct ubi_ainf_volume *av)
{
- printk(KERN_DEBUG "Volume scanning information dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", sv->vol_id);
- printk(KERN_DEBUG "\thighest_lnum %d\n", sv->highest_lnum);
- printk(KERN_DEBUG "\tleb_count %d\n", sv->leb_count);
- printk(KERN_DEBUG "\tcompat %d\n", sv->compat);
- printk(KERN_DEBUG "\tvol_type %d\n", sv->vol_type);
- printk(KERN_DEBUG "\tused_ebs %d\n", sv->used_ebs);
- printk(KERN_DEBUG "\tlast_data_size %d\n", sv->last_data_size);
- printk(KERN_DEBUG "\tdata_pad %d\n", sv->data_pad);
+ pr_err("Volume attaching information dump:\n");
+ pr_err("\tvol_id %d\n", av->vol_id);
+ pr_err("\thighest_lnum %d\n", av->highest_lnum);
+ pr_err("\tleb_count %d\n", av->leb_count);
+ pr_err("\tcompat %d\n", av->compat);
+ pr_err("\tvol_type %d\n", av->vol_type);
+ pr_err("\tused_ebs %d\n", av->used_ebs);
+ pr_err("\tlast_data_size %d\n", av->last_data_size);
+ pr_err("\tdata_pad %d\n", av->data_pad);
}
/**
- * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object.
- * @seb: the object to dump
+ * ubi_dump_aeb - dump a &struct ubi_ainf_peb object.
+ * @aeb: the object to dump
* @type: object type: 0 - not corrupted, 1 - corrupted
*/
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type)
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
{
- printk(KERN_DEBUG "eraseblock scanning information dump:\n");
- printk(KERN_DEBUG "\tec %d\n", seb->ec);
- printk(KERN_DEBUG "\tpnum %d\n", seb->pnum);
+ pr_err("eraseblock attaching information dump:\n");
+ pr_err("\tec %d\n", aeb->ec);
+ pr_err("\tpnum %d\n", aeb->pnum);
if (type == 0) {
- printk(KERN_DEBUG "\tlnum %d\n", seb->lnum);
- printk(KERN_DEBUG "\tscrub %d\n", seb->scrub);
- printk(KERN_DEBUG "\tsqnum %llu\n", seb->sqnum);
+ pr_err("\tlnum %d\n", aeb->lnum);
+ pr_err("\tscrub %d\n", aeb->scrub);
+ pr_err("\tsqnum %llu\n", aeb->sqnum);
}
}
/**
- * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
+ * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
* @req: the object to dump
*/
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
{
char nm[17];
- printk(KERN_DEBUG "Volume creation request dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", req->vol_id);
- printk(KERN_DEBUG "\talignment %d\n", req->alignment);
- printk(KERN_DEBUG "\tbytes %lld\n", (long long)req->bytes);
- printk(KERN_DEBUG "\tvol_type %d\n", req->vol_type);
- printk(KERN_DEBUG "\tname_len %d\n", req->name_len);
+ pr_err("Volume creation request dump:\n");
+ pr_err("\tvol_id %d\n", req->vol_id);
+ pr_err("\talignment %d\n", req->alignment);
+ pr_err("\tbytes %lld\n", (long long)req->bytes);
+ pr_err("\tvol_type %d\n", req->vol_type);
+ pr_err("\tname_len %d\n", req->name_len);
memcpy(nm, req->name, 16);
nm[16] = 0;
- printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm);
-}
-
-/**
- * ubi_dbg_dump_flash - dump a region of flash.
- * @ubi: UBI device description object
- * @pnum: the physical eraseblock number to dump
- * @offset: the starting offset within the physical eraseblock to dump
- * @len: the length of the region to dump
- */
-void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
-{
- int err;
- size_t read;
- void *buf;
- loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
-
- buf = vmalloc(len);
- if (!buf)
- return;
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
- if (err && err != -EUCLEAN) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offset, read);
- goto out;
- }
-
- dbg_msg("dumping %d bytes of data from PEB %d, offset %d",
- len, pnum, offset);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
-out:
- vfree(buf);
- return;
-}
-
-/**
- * ubi_debugging_init_dev - initialize debugging for an UBI device.
- * @ubi: UBI device description object
- *
- * This function initializes debugging-related data for UBI device @ubi.
- * Returns zero in case of success and a negative error code in case of
- * failure.
- */
-int ubi_debugging_init_dev(struct ubi_device *ubi)
-{
- ubi->dbg = kzalloc(sizeof(struct ubi_debug_info), GFP_KERNEL);
- if (!ubi->dbg)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * ubi_debugging_exit_dev - free debugging data for an UBI device.
- * @ubi: UBI device description object
- */
-void ubi_debugging_exit_dev(struct ubi_device *ubi)
-{
- kfree(ubi->dbg);
+ pr_err("\t1st 16 characters of name: %s\n", nm);
}
/*
@@ -271,6 +231,9 @@ static struct dentry *dfs_rootdir;
*/
int ubi_debugfs_init(void)
{
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
dfs_rootdir = debugfs_create_dir("ubi", NULL);
if (IS_ERR_OR_NULL(dfs_rootdir)) {
int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -288,7 +251,8 @@ int ubi_debugfs_init(void)
*/
void ubi_debugfs_exit(void)
{
- debugfs_remove(dfs_rootdir);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove(dfs_rootdir);
}
/* Read an UBI debugfs file */
@@ -305,7 +269,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
- d = ubi->dbg;
+ d = &ubi->dbg;
if (dent == d->dfs_chk_gen)
val = d->chk_gen;
@@ -351,7 +315,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
- d = ubi->dbg;
+ d = &ubi->dbg;
buf_size = min_t(size_t, count, (sizeof(buf) - 1));
if (copy_from_user(buf, user_buf, buf_size)) {
@@ -416,7 +380,10 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
unsigned long ubi_num = ubi->ubi_num;
const char *fname;
struct dentry *dent;
- struct ubi_debug_info *d = ubi->dbg;
+ struct ubi_debug_info *d = &ubi->dbg;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
ubi->ubi_num);
@@ -485,7 +452,6 @@ out:
*/
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
{
- debugfs_remove_recursive(ubi->dbg->dfs_dir);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ubi->dbg.dfs_dir);
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 64fbb0021825..33f8f3b2c9b2 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -21,29 +21,27 @@
#ifndef __UBI_DEBUG_H__
#define __UBI_DEBUG_H__
-#ifdef CONFIG_MTD_UBI_DEBUG
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
+
#include <linux/random.h>
#define ubi_assert(expr) do { \
if (unlikely(!(expr))) { \
- printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
+ pr_crit("UBI assert failed in %s at %u (pid %d)\n", \
__func__, __LINE__, current->pid); \
- ubi_dbg_dump_stack(); \
+ dump_stack(); \
} \
} while (0)
-#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
-
-#define ubi_dbg_dump_stack() dump_stack()
-
-#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
+#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
print_hex_dump(l, ps, pt, r, g, b, len, a)
#define ubi_dbg_msg(type, fmt, ...) \
- pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
+ pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid, \
+ ##__VA_ARGS__)
-/* Just a debugging messages not related to any specific UBI subsystem */
-#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
/* General debugging messages */
#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
/* Messages from the eraseblock association sub-system */
@@ -55,62 +53,18 @@
/* Initialization and build messages */
#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
-void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
-int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
-int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
- int offset, int len);
-int ubi_debugging_init_dev(struct ubi_device *ubi);
-void ubi_debugging_exit_dev(struct ubi_device *ubi);
+void ubi_dump_vol_info(const struct ubi_volume *vol);
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
+void ubi_dump_av(const struct ubi_ainf_volume *av);
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+ int len);
int ubi_debugfs_init(void);
void ubi_debugfs_exit(void);
int ubi_debugfs_init_dev(struct ubi_device *ubi);
void ubi_debugfs_exit_dev(struct ubi_device *ubi);
-/*
- * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
- * + 2 for the number plus 1 for the trailing zero byte.
- */
-#define UBI_DFS_DIR_NAME "ubi%d"
-#define UBI_DFS_DIR_LEN (3 + 2 + 1)
-
-/**
- * struct ubi_debug_info - debugging information for an UBI device.
- *
- * @chk_gen: if UBI general extra checks are enabled
- * @chk_io: if UBI I/O extra checks are enabled
- * @disable_bgt: disable the background task for testing purposes
- * @emulate_bitflips: emulate bit-flips for testing purposes
- * @emulate_io_failures: emulate write/erase failures for testing purposes
- * @dfs_dir_name: name of debugfs directory containing files of this UBI device
- * @dfs_dir: direntry object of the UBI device debugfs directory
- * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
- * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
- * @dfs_disable_bgt: debugfs knob to disable the background task
- * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
- * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
- */
-struct ubi_debug_info {
- unsigned int chk_gen:1;
- unsigned int chk_io:1;
- unsigned int disable_bgt:1;
- unsigned int emulate_bitflips:1;
- unsigned int emulate_io_failures:1;
- char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
- struct dentry *dfs_dir;
- struct dentry *dfs_chk_gen;
- struct dentry *dfs_chk_io;
- struct dentry *dfs_disable_bgt;
- struct dentry *dfs_emulate_bitflips;
- struct dentry *dfs_emulate_io_failures;
-};
-
/**
* ubi_dbg_is_bgt_disabled - if the background thread is disabled.
* @ubi: UBI device description object
@@ -120,7 +74,7 @@ struct ubi_debug_info {
*/
static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
{
- return ubi->dbg->disable_bgt;
+ return ubi->dbg.disable_bgt;
}
/**
@@ -131,7 +85,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_bitflips)
+ if (ubi->dbg.emulate_bitflips)
return !(random32() % 200);
return 0;
}
@@ -145,7 +99,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_io_failures)
+ if (ubi->dbg.emulate_io_failures)
return !(random32() % 500);
return 0;
}
@@ -159,78 +113,18 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
*/
static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg->emulate_io_failures)
+ if (ubi->dbg.emulate_io_failures)
return !(random32() % 400);
return 0;
}
-#else
-
-/* Use "if (0)" to make compiler check arguments even if debugging is off */
-#define ubi_assert(expr) do { \
- if (0) { \
- printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
- __func__, __LINE__, current->pid); \
- } \
-} while (0)
-
-#define dbg_err(fmt, ...) do { \
- if (0) \
- ubi_err(fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define ubi_dbg_msg(fmt, ...) do { \
- if (0) \
- printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
-} while (0)
-
-#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_gen(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_eba(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_wl(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_io(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_bld(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-
-static inline void ubi_dbg_dump_stack(void) { return; }
-static inline void
-ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) { return; }
-static inline void
-ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) { return; }
-static inline void
-ubi_dbg_dump_vol_info(const struct ubi_volume *vol) { return; }
-static inline void
-ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) { return; }
-static inline void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) { return; }
-static inline void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb,
- int type) { return; }
-static inline void
-ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) { return; }
-static inline void ubi_dbg_dump_flash(struct ubi_device *ubi,
- int pnum, int offset, int len) { return; }
-static inline void
-ubi_dbg_print_hex_dump(const char *l, const char *ps, int pt, int r,
- int g, const void *b, size_t len, bool a) { return; }
-static inline int ubi_dbg_check_all_ff(struct ubi_device *ubi,
- int pnum, int offset,
- int len) { return 0; }
-static inline int ubi_dbg_check_write(struct ubi_device *ubi,
- const void *buf, int pnum,
- int offset, int len) { return 0; }
-
-static inline int ubi_debugging_init_dev(struct ubi_device *ubi) { return 0; }
-static inline void ubi_debugging_exit_dev(struct ubi_device *ubi) { return; }
-static inline int ubi_debugfs_init(void) { return 0; }
-static inline void ubi_debugfs_exit(void) { return; }
-static inline int ubi_debugfs_init_dev(struct ubi_device *ubi) { return 0; }
-static inline void ubi_debugfs_exit_dev(struct ubi_device *ubi) { return; }
-
-static inline int
-ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi) { return 0; }
-static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) { return 0; }
-static inline int
-ubi_dbg_is_write_failure(const struct ubi_device *ubi) { return 0; }
-static inline int
-ubi_dbg_is_erase_failure(const struct ubi_device *ubi) { return 0; }
+static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_io;
+}
-#endif /* !CONFIG_MTD_UBI_DEBUG */
+static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_gen;
+}
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index c696c9481c95..9538380f6959 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -57,7 +57,7 @@
* global sequence counter value. It also increases the global sequence
* counter.
*/
-static unsigned long long next_sqnum(struct ubi_device *ubi)
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
{
unsigned long long sqnum;
@@ -340,8 +340,10 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
- err = ubi_wl_put_peb(ubi, pnum, 0);
+ up_read(&ubi->fm_sem);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
out_unlock:
leb_write_unlock(ubi, vol_id, lnum);
@@ -420,9 +422,8 @@ retry:
*/
if (err == UBI_IO_BAD_HDR_EBADMSG ||
err == UBI_IO_BAD_HDR) {
- ubi_warn("corrupted VID header at PEB "
- "%d, LEB %d:%d", pnum, vol_id,
- lnum);
+ ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
+ pnum, vol_id, lnum);
err = -EBADMSG;
} else
ubi_ro_mode(ubi);
@@ -507,7 +508,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
return -ENOMEM;
retry:
- new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
+ new_pnum = ubi_wl_get_peb(ubi);
if (new_pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
return new_pnum;
@@ -522,25 +523,25 @@ retry:
goto out_put;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
if (err)
goto write_error;
data_size = offset + len;
mutex_lock(&ubi->buf_mutex);
- memset(ubi->peb_buf1 + offset, 0xFF, len);
+ memset(ubi->peb_buf + offset, 0xFF, len);
/* Read everything before the area where the write failure happened */
if (offset > 0) {
- err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS)
goto out_unlock;
}
- memcpy(ubi->peb_buf1 + offset, buf, len);
+ memcpy(ubi->peb_buf + offset, buf, len);
- err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
+ err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
if (err) {
mutex_unlock(&ubi->buf_mutex);
goto write_error;
@@ -549,8 +550,10 @@ retry:
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = new_pnum;
- ubi_wl_put_peb(ubi, pnum, 1);
+ up_read(&ubi->fm_sem);
+ ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg("data was successfully recovered");
return 0;
@@ -558,7 +561,7 @@ retry:
out_unlock:
mutex_unlock(&ubi->buf_mutex);
out_put:
- ubi_wl_put_peb(ubi, new_pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -568,7 +571,7 @@ write_error:
* get another one.
*/
ubi_warn("failed to write to PEB %d", new_pnum);
- ubi_wl_put_peb(ubi, new_pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
if (++tries > UBI_IO_RETRIES) {
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -585,7 +588,6 @@ write_error:
* @buf: the data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
- * @dtype: data type
*
* This function writes data to logical eraseblock @lnum of a dynamic volume
* @vol. Returns zero in case of success and a negative error code in case
@@ -593,7 +595,7 @@ write_error:
* written to the flash media, but may be some garbage.
*/
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
- const void *buf, int offset, int len, int dtype)
+ const void *buf, int offset, int len)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -634,14 +636,14 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
@@ -661,14 +663,15 @@ retry:
if (len) {
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
- ubi_warn("failed to write %d bytes at offset %d of "
- "LEB %d:%d, PEB %d", len, offset, vol_id,
- lnum, pnum);
+ ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
goto write_error;
}
}
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -687,7 +690,7 @@ write_error:
* eraseblock, so just put it and request a new one. We assume that if
* this physical eraseblock went bad, the erase code will handle that.
*/
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
@@ -695,7 +698,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -707,7 +710,6 @@ write_error:
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
- * @dtype: data type
* @used_ebs: how many logical eraseblocks will this volume contain
*
* This function writes data to logical eraseblock @lnum of static volume
@@ -724,8 +726,7 @@ write_error:
* code in case of failure.
*/
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype,
- int used_ebs)
+ int lnum, const void *buf, int len, int used_ebs)
{
int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -750,7 +751,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -763,7 +764,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
@@ -788,7 +789,9 @@ retry:
}
ubi_assert(vol->eba_tbl[lnum] < 0);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -807,7 +810,7 @@ write_error:
return err;
}
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
@@ -815,7 +818,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -827,7 +830,6 @@ write_error:
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
- * @dtype: data type
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
@@ -839,7 +841,7 @@ write_error:
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype)
+ int lnum, const void *buf, int len)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -856,7 +858,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
- return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -868,7 +870,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
if (err)
goto out_mutex;
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -881,7 +883,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
err = pnum;
goto out_leb_unlock;
@@ -905,12 +907,14 @@ retry:
}
if (vol->eba_tbl[lnum] >= 0) {
- err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
if (err)
goto out_leb_unlock;
}
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_sem);
out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
@@ -930,13 +934,13 @@ write_error:
goto out_leb_unlock;
}
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
goto out_leb_unlock;
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -979,7 +983,7 @@ static int is_error_sane(int err)
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
* function. Returns:
* o %0 in case of success;
- * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_CANCEL_BITFLIPS, etc;
+ * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
* o a negative error code in case of failure.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
@@ -1044,22 +1048,21 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* cancel it.
*/
if (vol->eba_tbl[lnum] != from) {
- dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
- "PEB %d, cancel", vol_id, lnum, from,
- vol->eba_tbl[lnum]);
+ dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
+ vol_id, lnum, from, vol->eba_tbl[lnum]);
err = MOVE_CANCEL_RACE;
goto out_unlock_leb;
}
/*
* OK, now the LEB is locked and we can safely start moving it. Since
- * this function utilizes the @ubi->peb_buf1 buffer which is shared
+ * this function utilizes the @ubi->peb_buf buffer which is shared
* with some other functions - we lock the buffer by taking the
* @ubi->buf_mutex.
*/
mutex_lock(&ubi->buf_mutex);
dbg_wl("read %d bytes of data", aldata_size);
- err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading data from PEB %d",
err, from);
@@ -1079,10 +1082,10 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
*/
if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
aldata_size = data_size =
- ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
+ ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
cond_resched();
- crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
cond_resched();
/*
@@ -1096,7 +1099,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
}
- vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err) {
@@ -1111,17 +1114,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
if (err) {
if (err != UBI_IO_BITFLIPS) {
- ubi_warn("error %d while reading VID header back from "
- "PEB %d", err, to);
+ ubi_warn("error %d while reading VID header back from PEB %d",
+ err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
} else
- err = MOVE_CANCEL_BITFLIPS;
+ err = MOVE_TARGET_BITFLIPS;
goto out_unlock_buf;
}
if (data_size > 0) {
- err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
+ err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
if (err) {
if (err == -EIO)
err = MOVE_TARGET_WR_ERR;
@@ -1134,31 +1137,33 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* We've written the data and are going to read it back to make
* sure it was written correctly.
*/
-
- err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
+ memset(ubi->peb_buf, 0xFF, aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
if (err) {
if (err != UBI_IO_BITFLIPS) {
- ubi_warn("error %d while reading data back "
- "from PEB %d", err, to);
+ ubi_warn("error %d while reading data back from PEB %d",
+ err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
} else
- err = MOVE_CANCEL_BITFLIPS;
+ err = MOVE_TARGET_BITFLIPS;
goto out_unlock_buf;
}
cond_resched();
- if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
- ubi_warn("read data back from PEB %d and it is "
- "different", to);
+ if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
+ ubi_warn("read data back from PEB %d and it is different",
+ to);
err = -EINVAL;
goto out_unlock_buf;
}
}
ubi_assert(vol->eba_tbl[lnum] == from);
+ down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = to;
+ up_read(&ubi->fm_sem);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
@@ -1171,7 +1176,7 @@ out_unlock_leb:
* print_rsvd_warning - warn about not having enough reserved PEBs.
* @ubi: UBI device description object
*
- * This is a helper function for 'ubi_eba_init_scan()' which is called when UBI
+ * This is a helper function for 'ubi_eba_init()' which is called when UBI
* cannot reserve enough PEBs for bad block handling. This function makes a
* decision whether we have to print a warning or not. The algorithm is as
* follows:
@@ -1186,13 +1191,13 @@ out_unlock_leb:
* reported by real users.
*/
static void print_rsvd_warning(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+ struct ubi_attach_info *ai)
{
/*
* The 1 << 18 (256KiB) number is picked randomly, just a reasonably
* large number to distinguish between newly flashed and used images.
*/
- if (si->max_sqnum > (1 << 18)) {
+ if (ai->max_sqnum > (1 << 18)) {
int min = ubi->beb_rsvd_level / 10;
if (!min)
@@ -1201,27 +1206,123 @@ static void print_rsvd_warning(struct ubi_device *ubi,
return;
}
- ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d,"
- " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+ ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+ ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
if (ubi->corr_peb_count)
ubi_warn("%d PEBs are corrupted and not used",
- ubi->corr_peb_count);
+ ubi->corr_peb_count);
+}
+
+/**
+ * self_check_eba - run a self check on the EBA table constructed by fastmap.
+ * @ubi: UBI device description object
+ * @ai_fastmap: UBI attach info object created by fastmap
+ * @ai_scan: UBI attach info object created by scanning
+ *
+ * Returns < 0 in case of an internal error, 0 otherwise.
+ * If a bad EBA table entry was found it will be printed out and
+ * ubi_assert() triggers.
+ */
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan)
+{
+ int i, j, num_volumes, ret = 0;
+ int **scan_eba, **fm_eba;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *rb;
+
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
+ if (!scan_eba)
+ return -ENOMEM;
+
+ fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
+ if (!fm_eba) {
+ kfree(scan_eba);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_volumes; i++) {
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
+ GFP_KERNEL);
+ if (!scan_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
+ GFP_KERNEL);
+ if (!fm_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
+
+ av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ scan_eba[i][aeb->lnum] = aeb->pnum;
+
+ av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ fm_eba[i][aeb->lnum] = aeb->pnum;
+
+ for (j = 0; j < vol->reserved_pebs; j++) {
+ if (scan_eba[i][j] != fm_eba[i][j]) {
+ if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
+ fm_eba[i][j] == UBI_LEB_UNMAPPED)
+ continue;
+
+ ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
+ vol->vol_id, i, fm_eba[i][j],
+ scan_eba[i][j]);
+ ubi_assert(0);
+ }
+ }
+ }
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+
+ kfree(scan_eba[i]);
+ kfree(fm_eba[i]);
+ }
+
+ kfree(scan_eba);
+ kfree(fm_eba);
+ return ret;
}
/**
- * ubi_eba_init_scan - initialize the EBA sub-system using scanning information.
+ * ubi_eba_init - initialize the EBA sub-system using attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, j, err, num_volumes;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
struct rb_node *rb;
dbg_eba("initialize EBA sub-system");
@@ -1230,7 +1331,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
- ubi->global_sqnum = si->max_sqnum + 1;
+ ubi->global_sqnum = ai->max_sqnum + 1;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
for (i = 0; i < num_volumes; i++) {
@@ -1250,18 +1351,18 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
for (j = 0; j < vol->reserved_pebs; j++)
vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
- sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
- if (!sv)
+ av = ubi_find_av(ai, idx2vol_id(ubi, i));
+ if (!av)
continue;
- ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- if (seb->lnum >= vol->reserved_pebs)
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ if (aeb->lnum >= vol->reserved_pebs)
/*
* This may happen in case of an unclean reboot
* during re-size.
*/
- ubi_scan_move_to_list(sv, seb, &si->erase);
- vol->eba_tbl[seb->lnum] = seb->pnum;
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
}
}
@@ -1283,7 +1384,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (ubi->avail_pebs < ubi->beb_rsvd_level) {
/* No enough free physical eraseblocks */
ubi->beb_rsvd_pebs = ubi->avail_pebs;
- print_rsvd_warning(ubi, si);
+ print_rsvd_warning(ubi, ai);
} else
ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
new file mode 100644
index 000000000000..154275182b4b
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap.c
@@ -0,0 +1,1537 @@
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ */
+
+#include <linux/crc32.h>
+#include "ubi.h"
+
+/**
+ * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
+ * @ubi: UBI device description object
+ */
+size_t ubi_calc_fm_size(struct ubi_device *ubi)
+{
+ size_t size;
+
+ size = sizeof(struct ubi_fm_hdr) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ sizeof(struct ubi_fm_scan_pool) + \
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
+ (sizeof(struct ubi_fm_eba) + \
+ (ubi->peb_count * sizeof(__be32))) + \
+ sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ return roundup(size, ubi->leb_size);
+}
+
+
+/**
+ * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * @ubi: UBI device description object
+ * @vol_id: the VID of the new header
+ *
+ * Returns a new struct ubi_vid_hdr on success.
+ * NULL indicates out of memory.
+ */
+static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_vid_hdr *new;
+
+ new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!new)
+ goto out;
+
+ new->vol_type = UBI_VID_DYNAMIC;
+ new->vol_id = cpu_to_be32(vol_id);
+
+ /* UBI implementations without fastmap support have to delete the
+ * fastmap.
+ */
+ new->compat = UBI_COMPAT_DELETE;
+
+out:
+ return new;
+}
+
+/**
+ * add_aeb - create and add a attach erase block to a given list.
+ * @ai: UBI attach info object
+ * @list: the target list
+ * @pnum: PEB number of the new attach erase block
+ * @ec: erease counter of the new LEB
+ * @scrub: scrub this PEB after attaching
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
+ int pnum, int ec, int scrub)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ aeb->lnum = -1;
+ aeb->scrub = scrub;
+ aeb->copy_flag = aeb->sqnum = 0;
+
+ ai->ec_sum += aeb->ec;
+ ai->ec_count++;
+
+ if (ai->max_ec < aeb->ec)
+ ai->max_ec = aeb->ec;
+
+ if (ai->min_ec > aeb->ec)
+ ai->min_ec = aeb->ec;
+
+ list_add_tail(&aeb->u.list, list);
+
+ return 0;
+}
+
+/**
+ * add_vol - create and add a new volume to ubi_attach_info.
+ * @ai: ubi_attach_info object
+ * @vol_id: VID of the new volume
+ * @used_ebs: number of used EBS
+ * @data_pad: data padding value of the new volume
+ * @vol_type: volume type
+ * @last_eb_bytes: number of bytes in the last LEB
+ *
+ * Returns the new struct ubi_ainf_volume on success.
+ * NULL indicates an error.
+ */
+static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
+ int used_ebs, int data_pad, u8 vol_type,
+ int last_eb_bytes)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else if (vol_id > av->vol_id)
+ p = &(*p)->rb_right;
+ }
+
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
+ goto out;
+
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->used_ebs = used_ebs;
+ av->data_pad = data_pad;
+ av->last_data_size = last_eb_bytes;
+ av->compat = 0;
+ av->vol_type = vol_type;
+ av->root = RB_ROOT;
+
+ dbg_bld("found volume (ID %i)", vol_id);
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+
+out:
+ return av;
+}
+
+/**
+ * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
+ * from it's original list.
+ * @ai: ubi_attach_info object
+ * @aeb: the to be assigned SEB
+ * @av: target scan volume
+ */
+static void assign_aeb_to_av(struct ubi_attach_info *ai,
+ struct ubi_ainf_peb *aeb,
+ struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *tmp_aeb;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ p = &av->root.rb_node;
+ while (*p) {
+ parent = *p;
+
+ tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (aeb->lnum != tmp_aeb->lnum) {
+ if (aeb->lnum < tmp_aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ } else
+ break;
+ }
+
+ list_del(&aeb->u.list);
+ av->leb_count++;
+
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+}
+
+/**
+ * update_vol - inserts or updates a LEB which was found a pool.
+ * @ubi: the UBI device object
+ * @ai: attach info object
+ * @av: the volume this LEB belongs to
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
+ struct ubi_ainf_peb *aeb, *victim;
+ int cmp_res;
+
+ while (*p) {
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+
+ if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
+ if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ }
+
+ /* This case can happen if the fastmap gets written
+ * because of a volume change (creation, deletion, ..).
+ * Then a PEB can be within the persistent EBA and the pool.
+ */
+ if (aeb->pnum == new_aeb->pnum) {
+ ubi_assert(aeb->lnum == new_aeb->lnum);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ /* new_aeb is newer */
+ if (cmp_res & 1) {
+ victim = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!victim)
+ return -ENOMEM;
+
+ victim->ec = aeb->ec;
+ victim->pnum = aeb->pnum;
+ list_add_tail(&victim->u.list, &ai->erase);
+
+ if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
+ av->last_data_size = \
+ be32_to_cpu(new_vh->data_size);
+
+ dbg_bld("vol %i: AEB %i's PEB %i is the newer",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+
+ aeb->ec = new_aeb->ec;
+ aeb->pnum = new_aeb->pnum;
+ aeb->copy_flag = new_vh->copy_flag;
+ aeb->scrub = new_aeb->scrub;
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ /* new_aeb is older */
+ } else {
+ dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+ list_add_tail(&new_aeb->u.list, &ai->erase);
+ }
+
+ return 0;
+ }
+ /* This LEB is new, let's add it to the volume */
+
+ if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
+ av->highest_lnum = be32_to_cpu(new_vh->lnum);
+ av->last_data_size = be32_to_cpu(new_vh->data_size);
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = be32_to_cpu(new_vh->used_ebs);
+
+ av->leb_count++;
+
+ rb_link_node(&new_aeb->u.rb, parent, p);
+ rb_insert_color(&new_aeb->u.rb, &av->root);
+
+ return 0;
+}
+
+/**
+ * process_pool_aeb - we found a non-empty PEB in a pool.
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct ubi_ainf_volume *av, *tmp_av = NULL;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ int found = 0;
+
+ if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
+ be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ /* Find the volume this SEB belongs to */
+ while (*p) {
+ parent = *p;
+ tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
+ p = &(*p)->rb_left;
+ else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
+ p = &(*p)->rb_right;
+ else {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ av = tmp_av;
+ else {
+ ubi_err("orphaned volume in fastmap pool!");
+ return UBI_BAD_FASTMAP;
+ }
+
+ ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
+
+ return update_vol(ubi, ai, av, new_vh, new_aeb);
+}
+
+/**
+ * unmap_peb - unmap a PEB.
+ * If fastmap detects a free PEB in the pool it has to check whether
+ * this PEB has been unmapped after writing the fastmap.
+ *
+ * @ai: UBI attach info object
+ * @pnum: The PEB to be unmapped
+ */
+static void unmap_peb(struct ubi_attach_info *ai, int pnum)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *node, *node2;
+ struct ubi_ainf_peb *aeb;
+
+ for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
+ av = rb_entry(node, struct ubi_ainf_volume, rb);
+
+ for (node2 = rb_first(&av->root); node2;
+ node2 = rb_next(node2)) {
+ aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
+ if (aeb->pnum == pnum) {
+ rb_erase(&aeb->u.rb, &av->root);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * scan_pool - scans a pool for changed (no longer empty PEBs).
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @pebs: an array of all PEB numbers in the to be scanned pool
+ * @pool_size: size of the pool (number of entries in @pebs)
+ * @max_sqnum: pointer to the maximal sequence number
+ * @eba_orphans: list of PEBs which need to be scanned
+ * @free: list of PEBs which are most likely free (and go into @ai->free)
+ *
+ * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
+ * < 0 indicates an internal error.
+ */
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int *pebs, int pool_size, unsigned long long *max_sqnum,
+ struct list_head *eba_orphans, struct list_head *free)
+{
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_ainf_peb *new_aeb, *tmp_aeb;
+ int i, pnum, err, found_orphan, ret = 0;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return -ENOMEM;
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ kfree(ech);
+ return -ENOMEM;
+ }
+
+ dbg_bld("scanning fastmap pool: size = %i", pool_size);
+
+ /*
+ * Now scan all PEBs in the pool to find changes which have been made
+ * after the creation of the fastmap
+ */
+ for (i = 0; i < pool_size; i++) {
+ int scrub = 0;
+
+ pnum = be32_to_cpu(pebs[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ubi_err("bad PEB in fastmap pool!");
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read EC header! PEB:%i err:%i",
+ pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ } else if (ret == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+ ubi_err("bad image seq: 0x%x, expected: 0x%x",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ err = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
+ unsigned long long ec = be64_to_cpu(ech->ec);
+ unmap_peb(ai, pnum);
+ dbg_bld("Adding PEB to free: %i", pnum);
+ if (err == UBI_IO_FF_BITFLIPS)
+ add_aeb(ai, free, pnum, ec, 1);
+ else
+ add_aeb(ai, free, pnum, ec, 0);
+ continue;
+ } else if (err == 0 || err == UBI_IO_BITFLIPS) {
+ dbg_bld("Found non empty PEB:%i in pool", pnum);
+
+ if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ found_orphan = 0;
+ list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ found_orphan = 1;
+ break;
+ }
+ }
+ if (found_orphan) {
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ list_del(&tmp_aeb->u.list);
+ }
+
+ new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!new_aeb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ new_aeb->ec = be64_to_cpu(ech->ec);
+ new_aeb->pnum = pnum;
+ new_aeb->lnum = be32_to_cpu(vh->lnum);
+ new_aeb->sqnum = be64_to_cpu(vh->sqnum);
+ new_aeb->copy_flag = vh->copy_flag;
+ new_aeb->scrub = scrub;
+
+ if (*max_sqnum < new_aeb->sqnum)
+ *max_sqnum = new_aeb->sqnum;
+
+ err = process_pool_aeb(ubi, ai, vh, new_aeb);
+ if (err) {
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+ } else {
+ /* We are paranoid and fall back to scanning mode */
+ ubi_err("fastmap pool PEBs contains damaged PEBs!");
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+
+ }
+
+out:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+ return ret;
+}
+
+/**
+ * count_fastmap_pebs - Counts the PEBs found by fastmap.
+ * @ai: The UBI attach info object
+ */
+static int count_fastmap_pebs(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb1, *rb2;
+ int n = 0;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ n++;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ n++;
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ n++;
+
+ return n;
+}
+
+/**
+ * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info object
+ * @fm: the fastmap to be attached
+ *
+ * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
+ * < 0 indicates an internal error.
+ */
+static int ubi_attach_fastmap(struct ubi_device *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_fastmap_layout *fm)
+{
+ struct list_head used, eba_orphans, free;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmhdr;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fmec;
+ struct ubi_fm_volhdr *fmvhdr;
+ struct ubi_fm_eba *fm_eba;
+ int ret, i, j, pool_size, wl_pool_size;
+ size_t fm_pos = 0, fm_size = ubi->fm_size;
+ unsigned long long max_sqnum = 0;
+ void *fm_raw = ubi->fm_buf;
+
+ INIT_LIST_HEAD(&used);
+ INIT_LIST_HEAD(&free);
+ INIT_LIST_HEAD(&eba_orphans);
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->min_ec = UBI_MAX_ERASECOUNTER;
+
+ ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fmsb = (struct ubi_fm_sb *)(fm_raw);
+ ai->max_sqnum = fmsb->sqnum;
+ fm_pos += sizeof(struct ubi_fm_sb);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
+ ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ pool_size = be16_to_cpu(fmpl1->size);
+ wl_pool_size = be16_to_cpu(fmpl2->size);
+ fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
+ fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
+
+ if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
+ ubi_err("bad pool size: %i", pool_size);
+ goto fail_bad;
+ }
+
+ if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
+ ubi_err("bad WL pool size: %i", wl_pool_size);
+ goto fail_bad;
+ }
+
+
+ if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_pool_size < 0) {
+ ubi_err("bad maximal pool size: %i", fm->max_pool_size);
+ goto fail_bad;
+ }
+
+ if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_wl_pool_size < 0) {
+ ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
+ goto fail_bad;
+ }
+
+ /* read EC values from free list */
+ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from used list */
+ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from scrub list */
+ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ /* read EC values from erase list */
+ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+ ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
+
+ /* Iterate over all volumes and read their EBA table */
+ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
+ fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmvhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
+ ubi_err("bad fastmap vol header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
+ goto fail_bad;
+ }
+
+ av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
+ be32_to_cpu(fmvhdr->used_ebs),
+ be32_to_cpu(fmvhdr->data_pad),
+ fmvhdr->vol_type,
+ be32_to_cpu(fmvhdr->last_eb_bytes));
+
+ if (!av)
+ goto fail_bad;
+
+ ai->vols_found++;
+ if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
+ ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
+
+ fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fm_eba);
+ fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
+ ubi_err("bad fastmap EBA header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
+ goto fail_bad;
+ }
+
+ for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
+ int pnum = be32_to_cpu(fm_eba->pnum[j]);
+
+ if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
+ continue;
+
+ aeb = NULL;
+ list_for_each_entry(tmp_aeb, &used, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ aeb = tmp_aeb;
+ break;
+ }
+ }
+
+ /* This can happen if a PEB is already in an EBA known
+ * by this fastmap but the PEB itself is not in the used
+ * list.
+ * In this case the PEB can be within the fastmap pool
+ * or while writing the fastmap it was in the protection
+ * queue.
+ */
+ if (!aeb) {
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!aeb) {
+ ret = -ENOMEM;
+
+ goto fail;
+ }
+
+ aeb->lnum = j;
+ aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
+ aeb->ec = -1;
+ aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
+ list_add_tail(&aeb->u.list, &eba_orphans);
+ continue;
+ }
+
+ aeb->lnum = j;
+
+ if (av->highest_lnum <= aeb->lnum)
+ av->highest_lnum = aeb->lnum;
+
+ assign_aeb_to_av(ai, aeb, av);
+
+ dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
+ aeb->pnum, aeb->lnum, av->vol_id);
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
+ u.list) {
+ int err;
+
+ if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
+ ubi_err("bad PEB in fastmap EBA orphan list");
+ ret = UBI_BAD_FASTMAP;
+ kfree(ech);
+ goto fail;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read EC header! PEB:%i " \
+ "err:%i", tmp_aeb->pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ kfree(ech);
+
+ goto fail;
+ } else if (err == UBI_IO_BITFLIPS)
+ tmp_aeb->scrub = 1;
+
+ tmp_aeb->ec = be64_to_cpu(ech->ec);
+ assign_aeb_to_av(ai, tmp_aeb, av);
+ }
+
+ kfree(ech);
+ }
+
+ ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
+ &eba_orphans, &free);
+ if (ret)
+ goto fail;
+
+ ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
+ &eba_orphans, &free);
+ if (ret)
+ goto fail;
+
+ if (max_sqnum > ai->max_sqnum)
+ ai->max_sqnum = max_sqnum;
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->free);
+
+ /*
+ * If fastmap is leaking PEBs (must not happen), raise a
+ * fat warning and fall back to scanning mode.
+ * We do this here because in ubi_wl_init() it's too late
+ * and we cannot fall back to scanning.
+ */
+ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
+ ai->bad_peb_count - fm->used_blocks))
+ goto fail_bad;
+
+ return 0;
+
+fail_bad:
+ ret = UBI_BAD_FASTMAP;
+fail:
+ return ret;
+}
+
+/**
+ * ubi_scan_fastmap - scan the fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info to be filled
+ * @fm_anchor: The fastmap starts at this PEB
+ *
+ * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
+ * UBI_BAD_FASTMAP if one was found but is not usable.
+ * < 0 indicates an internal error.
+ */
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor)
+{
+ struct ubi_fm_sb *fmsb, *fmsb2;
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fastmap_layout *fm;
+ int i, used_blocks, pnum, ret = 0;
+ size_t fm_size;
+ __be32 crc, tmp_crc;
+ unsigned long long sqnum = 0;
+
+ mutex_lock(&ubi->fm_mutex);
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
+ if (!fmsb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm) {
+ ret = -ENOMEM;
+ kfree(fmsb);
+ goto out;
+ }
+
+ ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
+ if (ret && ret != UBI_IO_BITFLIPS)
+ goto free_fm_sb;
+ else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[0] = 1;
+
+ if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
+ ubi_err("bad super block magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ if (fmsb->version != UBI_FM_FMT_VERSION) {
+ ubi_err("bad fastmap version: %i, expected: %i",
+ fmsb->version, UBI_FM_FMT_VERSION);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ used_blocks = be32_to_cpu(fmsb->used_blocks);
+ if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
+ ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ fm_size = ubi->leb_size * used_blocks;
+ if (fm_size != ubi->fm_size) {
+ ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
+ ubi->fm_size);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto free_fm_sb;
+ }
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ pnum = be32_to_cpu(fmsb->block_loc[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
+ i, pnum);
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ } else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[i] = 1;
+
+ if (!ubi->image_seq)
+ ubi->image_seq = be32_to_cpu(ech->image_seq);
+
+ if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i)",
+ i, pnum);
+ goto free_hdr;
+ }
+
+ if (i == 0) {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
+ ubi_err("bad fastmap anchor vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_SB_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ } else {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
+ ubi_err("bad fastmap data vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_DATA_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ }
+
+ if (sqnum < be64_to_cpu(vh->sqnum))
+ sqnum = be64_to_cpu(vh->sqnum);
+
+ ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
+ ubi->leb_start, ubi->leb_size);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i, " \
+ "err: %i)", i, pnum, ret);
+ goto free_hdr;
+ }
+ }
+
+ kfree(fmsb);
+ fmsb = NULL;
+
+ fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
+ tmp_crc = be32_to_cpu(fmsb2->data_crc);
+ fmsb2->data_crc = 0;
+ crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
+ if (crc != tmp_crc) {
+ ubi_err("fastmap data CRC is invalid");
+ ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ fmsb2->sqnum = sqnum;
+
+ fm->used_blocks = used_blocks;
+
+ ret = ubi_attach_fastmap(ubi, ai, fm);
+ if (ret) {
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ struct ubi_wl_entry *e;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e) {
+ while (i--)
+ kfree(fm->e[i]);
+
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
+ e->ec = be32_to_cpu(fmsb2->block_ec[i]);
+ fm->e[i] = e;
+ }
+
+ ubi->fm = fm;
+ ubi->fm_pool.max_size = ubi->fm->max_pool_size;
+ ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
+ ubi_msg("attached by fastmap");
+ ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+ ubi->fm_disabled = 0;
+
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+out:
+ mutex_unlock(&ubi->fm_mutex);
+ if (ret == UBI_BAD_FASTMAP)
+ ubi_err("Attach by fastmap failed, doing a full scan!");
+ return ret;
+
+free_hdr:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+free_fm_sb:
+ kfree(fmsb);
+ kfree(fm);
+ goto out;
+}
+
+/**
+ * ubi_write_fastmap - writes a fastmap.
+ * @ubi: UBI device object
+ * @new_fm: the to be written fastmap
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int ubi_write_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *new_fm)
+{
+ size_t fm_pos = 0;
+ void *fm_raw;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmh;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fec;
+ struct ubi_fm_volhdr *fvh;
+ struct ubi_fm_eba *feba;
+ struct rb_node *node;
+ struct ubi_wl_entry *wl_e;
+ struct ubi_volume *vol;
+ struct ubi_vid_hdr *avhdr, *dvhdr;
+ struct ubi_work *ubi_wrk;
+ int ret, i, j, free_peb_count, used_peb_count, vol_count;
+ int scrub_peb_count, erase_peb_count;
+
+ fm_raw = ubi->fm_buf;
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!avhdr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
+ if (!dvhdr) {
+ ret = -ENOMEM;
+ goto out_kfree;
+ }
+
+ spin_lock(&ubi->volumes_lock);
+ spin_lock(&ubi->wl_lock);
+
+ fmsb = (struct ubi_fm_sb *)fm_raw;
+ fm_pos += sizeof(*fmsb);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
+ fmsb->version = UBI_FM_FMT_VERSION;
+ fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
+ /* the max sqnum will be filled in while *reading* the fastmap */
+ fmsb->sqnum = 0;
+
+ fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
+ free_peb_count = 0;
+ used_peb_count = 0;
+ scrub_peb_count = 0;
+ erase_peb_count = 0;
+ vol_count = 0;
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
+ fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
+
+ for (i = 0; i < ubi->fm_pool.size; i++)
+ fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
+ fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
+
+ for (i = 0; i < ubi->fm_wl_pool.size; i++)
+ fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+
+ for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ free_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->free_peb_count = cpu_to_be32(free_peb_count);
+
+ for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->used_peb_count = cpu_to_be32(used_peb_count);
+
+ for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+ wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ scrub_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
+
+
+ list_for_each_entry(ubi_wrk, &ubi->works, list) {
+ if (ubi_is_erase_work(ubi_wrk)) {
+ wl_e = ubi_wrk->e;
+ ubi_assert(wl_e);
+
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ erase_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ }
+ fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
+
+ for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
+ vol = ubi->volumes[i];
+
+ if (!vol)
+ continue;
+
+ vol_count++;
+
+ fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fvh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
+ fvh->vol_id = cpu_to_be32(vol->vol_id);
+ fvh->vol_type = vol->vol_type;
+ fvh->used_ebs = cpu_to_be32(vol->used_ebs);
+ fvh->data_pad = cpu_to_be32(vol->data_pad);
+ fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
+
+ ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
+ vol->vol_type == UBI_STATIC_VOLUME);
+
+ feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
+
+ feba->reserved_pebs = cpu_to_be32(j);
+ feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
+ }
+ fmh->vol_count = cpu_to_be32(vol_count);
+ fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
+
+ avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ avhdr->lnum = 0;
+
+ spin_unlock(&ubi->wl_lock);
+ spin_unlock(&ubi->volumes_lock);
+
+ dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
+ if (ret) {
+ ubi_err("unable to write vid_hdr to fastmap SB!");
+ goto out_kfree;
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
+ fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
+ }
+
+ fmsb->data_crc = 0;
+ fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
+ ubi->fm_size));
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ dvhdr->lnum = cpu_to_be32(i);
+ dbg_bld("writing fastmap data to PEB %i sqnum %llu",
+ new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
+ if (ret) {
+ ubi_err("unable to write vid_hdr to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
+ new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
+ if (ret) {
+ ubi_err("unable to write fastmap to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ ubi_assert(new_fm);
+ ubi->fm = new_fm;
+
+ dbg_bld("fastmap written!");
+
+out_kfree:
+ ubi_free_vid_hdr(ubi, avhdr);
+ ubi_free_vid_hdr(ubi, dvhdr);
+out:
+ return ret;
+}
+
+/**
+ * erase_block - Manually erase a PEB.
+ * @ubi: UBI device object
+ * @pnum: PEB to be erased
+ *
+ * Returns the new EC value on success, < 0 indicates an internal error.
+ */
+static int erase_block(struct ubi_device *ubi, int pnum)
+{
+ int ret;
+ struct ubi_ec_hdr *ec_hdr;
+ long long ec;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (ret < 0)
+ goto out;
+ else if (ret && ret != UBI_IO_BITFLIPS) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ubi_io_sync_erase(ubi, pnum, 0);
+ if (ret < 0)
+ goto out;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ ec += ret;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ec_hdr->ec = cpu_to_be64(ec);
+ ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+ if (ret < 0)
+ goto out;
+
+ ret = ec;
+out:
+ kfree(ec_hdr);
+ return ret;
+}
+
+/**
+ * invalidate_fastmap - destroys a fastmap.
+ * @ubi: UBI device object
+ * @fm: the fastmap to be destroyed
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int invalidate_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *fm)
+{
+ int ret, i;
+ struct ubi_vid_hdr *vh;
+
+ ret = erase_block(ubi, fm->e[0]->pnum);
+ if (ret < 0)
+ return ret;
+
+ vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!vh)
+ return -ENOMEM;
+
+ /* deleting the current fastmap SB is not enough, an old SB may exist,
+ * so create a (corrupted) SB such that fastmap will find it and fall
+ * back to scanning mode in any case */
+ vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
+
+ for (i = 0; i < fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
+
+ return ret;
+}
+
+/**
+ * ubi_update_fastmap - will be called by UBI if a volume changes or
+ * a fastmap pool becomes full.
+ * @ubi: UBI device object
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+int ubi_update_fastmap(struct ubi_device *ubi)
+{
+ int ret, i;
+ struct ubi_fastmap_layout *new_fm, *old_fm;
+ struct ubi_wl_entry *tmp_e;
+
+ mutex_lock(&ubi->fm_mutex);
+
+ ubi_refill_pools(ubi);
+
+ if (ubi->ro_mode || ubi->fm_disabled) {
+ mutex_unlock(&ubi->fm_mutex);
+ return 0;
+ }
+
+ ret = ubi_ensure_anchor_pebs(ubi);
+ if (ret) {
+ mutex_unlock(&ubi->fm_mutex);
+ return ret;
+ }
+
+ new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ if (!new_fm) {
+ mutex_unlock(&ubi->fm_mutex);
+ return -ENOMEM;
+ }
+
+ new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!new_fm->e[i]) {
+ while (i--)
+ kfree(new_fm->e[i]);
+
+ kfree(new_fm);
+ mutex_unlock(&ubi->fm_mutex);
+ return -ENOMEM;
+ }
+ }
+
+ old_fm = ubi->fm;
+ ubi->fm = NULL;
+
+ if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
+ ubi_err("fastmap too large");
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 0);
+ spin_unlock(&ubi->wl_lock);
+
+ if (!tmp_e && !old_fm) {
+ int j;
+ ubi_err("could not get any free erase block");
+
+ for (j = 1; j < i; j++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+
+ ret = -ENOSPC;
+ goto err;
+ } else if (!tmp_e && old_fm) {
+ ret = erase_block(ubi, old_fm->e[i]->pnum);
+ if (ret < 0) {
+ int j;
+
+ for (j = 1; j < i; j++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j],
+ j, 0);
+
+ ubi_err("could not erase old fastmap PEB");
+ goto err;
+ }
+
+ new_fm->e[i]->pnum = old_fm->e[i]->pnum;
+ new_fm->e[i]->ec = old_fm->e[i]->ec;
+ } else {
+ new_fm->e[i]->pnum = tmp_e->pnum;
+ new_fm->e[i]->ec = tmp_e->ec;
+
+ if (old_fm)
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ }
+ }
+
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+ spin_unlock(&ubi->wl_lock);
+
+ if (old_fm) {
+ /* no fresh anchor PEB was found, reuse the old one */
+ if (!tmp_e) {
+ ret = erase_block(ubi, old_fm->e[0]->pnum);
+ if (ret < 0) {
+ int i;
+ ubi_err("could not erase old anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i],
+ i, 0);
+ goto err;
+ }
+
+ new_fm->e[0]->pnum = old_fm->e[0]->pnum;
+ new_fm->e[0]->ec = ret;
+ } else {
+ /* we've got a new anchor PEB, return the old one */
+ ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
+ old_fm->to_be_tortured[0]);
+
+ new_fm->e[0]->pnum = tmp_e->pnum;
+ new_fm->e[0]->ec = tmp_e->ec;
+ }
+ } else {
+ if (!tmp_e) {
+ int i;
+ ubi_err("could not find any anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++)
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
+
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ new_fm->e[0]->pnum = tmp_e->pnum;
+ new_fm->e[0]->ec = tmp_e->ec;
+ }
+
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_sem);
+ ret = ubi_write_fastmap(ubi, new_fm);
+ up_write(&ubi->fm_sem);
+ up_write(&ubi->work_sem);
+
+ if (ret)
+ goto err;
+
+out_unlock:
+ mutex_unlock(&ubi->fm_mutex);
+ kfree(old_fm);
+ return ret;
+
+err:
+ kfree(new_fm);
+
+ ubi_warn("Unable to write new fastmap, err=%i", ret);
+
+ ret = 0;
+ if (old_fm) {
+ ret = invalidate_fastmap(ubi, old_fm);
+ if (ret < 0)
+ ubi_err("Unable to invalidiate current fastmap!");
+ else if (ret)
+ ret = 0;
+ }
+ goto out_unlock;
+}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 941bc3c05d6e..782f12d7756b 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -41,7 +41,7 @@
#include "ubi-media.h"
#define err_msg(fmt, ...) \
- printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \
+ pr_err("gluebi (pid %d): %s: " fmt "\n", \
current->pid, __func__, ##__VA_ARGS__)
/**
@@ -171,7 +171,7 @@ static void gluebi_put_device(struct mtd_info *mtd)
static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
- int err = 0, lnum, offs, total_read;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
if (len < 0 || from < 0 || from + len > mtd->size)
@@ -180,12 +180,12 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
gluebi = container_of(mtd, struct gluebi_device, mtd);
lnum = div_u64_rem(from, mtd->erasesize, &offs);
- total_read = len;
- while (total_read) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_read = mtd->erasesize - offs;
- if (to_read > total_read)
- to_read = total_read;
+ if (to_read > bytes_left)
+ to_read = bytes_left;
err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
if (err)
@@ -193,11 +193,11 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
lnum += 1;
offs = 0;
- total_read -= to_read;
+ bytes_left -= to_read;
buf += to_read;
}
- *retlen = len - total_read;
+ *retlen = len - bytes_left;
return err;
}
@@ -215,7 +215,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
- int err = 0, lnum, offs, total_written;
+ int err = 0, lnum, offs, bytes_left;
struct gluebi_device *gluebi;
if (len < 0 || to < 0 || len + to > mtd->size)
@@ -231,24 +231,24 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
if (len % mtd->writesize || offs % mtd->writesize)
return -EINVAL;
- total_written = len;
- while (total_written) {
+ bytes_left = len;
+ while (bytes_left) {
size_t to_write = mtd->erasesize - offs;
- if (to_write > total_written)
- to_write = total_written;
+ if (to_write > bytes_left)
+ to_write = bytes_left;
- err = ubi_write(gluebi->desc, lnum, buf, offs, to_write);
+ err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write);
if (err)
break;
lnum += 1;
offs = 0;
- total_written -= to_write;
+ bytes_left -= to_write;
buf += to_write;
}
- *retlen = len - total_written;
+ *retlen = len - bytes_left;
return err;
}
@@ -360,9 +360,8 @@ static int gluebi_create(struct ubi_device_info *di,
mutex_lock(&devices_mutex);
g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (g)
- err_msg("gluebi MTD device %d form UBI device %d volume %d "
- "already exists", g->mtd.index, vi->ubi_num,
- vi->vol_id);
+ err_msg("gluebi MTD device %d form UBI device %d volume %d already exists",
+ g->mtd.index, vi->ubi_num, vi->vol_id);
mutex_unlock(&devices_mutex);
if (mtd_device_register(mtd, NULL, 0)) {
@@ -395,8 +394,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
mutex_lock(&devices_mutex);
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
- err_msg("got remove notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got remove notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
err = -ENOENT;
} else if (gluebi->refcnt)
err = -EBUSY;
@@ -409,9 +408,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
mtd = &gluebi->mtd;
err = mtd_device_unregister(mtd);
if (err) {
- err_msg("cannot remove fake MTD device %d, UBI device %d, "
- "volume %d, error %d", mtd->index, gluebi->ubi_num,
- gluebi->vol_id, err);
+ err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d",
+ mtd->index, gluebi->ubi_num, gluebi->vol_id, err);
mutex_lock(&devices_mutex);
list_add_tail(&gluebi->list, &gluebi_devices);
mutex_unlock(&devices_mutex);
@@ -441,8 +439,8 @@ static int gluebi_updated(struct ubi_volume_info *vi)
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
mutex_unlock(&devices_mutex);
- err_msg("got update notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
return -ENOENT;
}
@@ -468,8 +466,8 @@ static int gluebi_resized(struct ubi_volume_info *vi)
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
mutex_unlock(&devices_mutex);
- err_msg("got update notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
return -ENOENT;
}
gluebi->mtd.size = vi->used_bytes;
@@ -526,9 +524,9 @@ static void __exit ubi_gluebi_exit(void)
err = mtd_device_unregister(mtd);
if (err)
- err_msg("error %d while removing gluebi MTD device %d, "
- "UBI device %d, volume %d - ignoring", err,
- mtd->index, gluebi->ubi_num, gluebi->vol_id);
+ err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring",
+ err, mtd->index, gluebi->ubi_num,
+ gluebi->vol_id);
kfree(mtd->name);
kfree(gluebi);
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 6ba55c235873..545483018cde 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -91,21 +91,15 @@
#include <linux/slab.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_ec_hdr *ec_hdr);
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_vid_hdr *vid_hdr);
-#else
-#define paranoid_check_not_bad(ubi, pnum) 0
-#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
-#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0
-#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
-#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
-#endif
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr);
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr);
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len);
/**
* ubi_io_read - read data from a physical eraseblock.
@@ -142,7 +136,7 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
ubi_assert(len > 0);
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err)
return err;
@@ -183,22 +177,21 @@ retry:
* enabled. A corresponding message will be printed
* later, when it is has been scrubbed.
*/
- dbg_msg("fixable bit-flip detected at PEB %d", pnum);
+ ubi_msg("fixable bit-flip detected at PEB %d", pnum);
ubi_assert(len == read);
return UBI_IO_BITFLIPS;
}
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error %d%s while reading %d bytes from PEB "
- "%d:%d, read only %zd bytes, retry",
- err, errstr, len, pnum, offset, read);
+ ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
+ err, errstr, len, pnum, offset, read);
yield();
goto retry;
}
- ubi_err("error %d%s while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, errstr, len, pnum, offset, read);
- ubi_dbg_dump_stack();
+ ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, errstr, len, pnum, offset, read);
+ dump_stack();
/*
* The driver should never return -EBADMSG if it failed to read
@@ -257,14 +250,12 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
return -EROFS;
}
- /* The below has to be compiled out if paranoid checks are disabled */
-
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err)
return err;
/* The area we are writing to has to contain all 0xFF bytes */
- err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
if (err)
return err;
@@ -273,33 +264,33 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
* We write to the data area of the physical eraseblock. Make
* sure it has valid EC and VID headers.
*/
- err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
- err = paranoid_check_peb_vid_hdr(ubi, pnum);
+ err = self_check_peb_vid_hdr(ubi, pnum);
if (err)
return err;
}
if (ubi_dbg_is_write_failure(ubi)) {
- dbg_err("cannot write %d bytes to PEB %d:%d "
- "(emulated)", len, pnum, offset);
- ubi_dbg_dump_stack();
+ ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
+ len, pnum, offset);
+ dump_stack();
return -EIO;
}
addr = (loff_t)pnum * ubi->peb_size + offset;
err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
if (err) {
- ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
- "%zd bytes", err, len, pnum, offset, written);
- ubi_dbg_dump_stack();
- ubi_dbg_dump_flash(ubi, pnum, offset, len);
+ ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+ err, len, pnum, offset, written);
+ dump_stack();
+ ubi_dump_flash(ubi, pnum, offset, len);
} else
ubi_assert(written == len);
if (!err) {
- err = ubi_dbg_check_write(ubi, buf, pnum, offset, len);
+ err = self_check_write(ubi, buf, pnum, offset, len);
if (err)
return err;
@@ -310,7 +301,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
offset += len;
len = ubi->peb_size - offset;
if (len)
- err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
}
return err;
@@ -364,13 +355,13 @@ retry:
err = ubi->mtd->erase(ubi->mtd, &ei);
if (err) {
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error %d while erasing PEB %d, retry",
- err, pnum);
+ ubi_warn("error %d while erasing PEB %d, retry",
+ err, pnum);
yield();
goto retry;
}
ubi_err("cannot erase PEB %d, error %d", pnum, err);
- ubi_dbg_dump_stack();
+ dump_stack();
return err;
}
@@ -383,21 +374,21 @@ retry:
if (ei.state == MTD_ERASE_FAILED) {
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error while erasing PEB %d, retry", pnum);
+ ubi_warn("error while erasing PEB %d, retry", pnum);
yield();
goto retry;
}
ubi_err("cannot erase PEB %d", pnum);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EIO;
}
- err = ubi_dbg_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+ err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
if (err)
return err;
if (ubi_dbg_is_erase_failure(ubi)) {
- dbg_err("cannot erase PEB %d (emulated)", pnum);
+ ubi_err("cannot erase PEB %d (emulated)", pnum);
return -EIO;
}
@@ -431,11 +422,11 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
goto out;
/* Make sure the PEB contains only 0xFF bytes */
- err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
- err = ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size);
+ err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
if (err == 0) {
ubi_err("erased PEB %d, but a non-0xFF byte found",
pnum);
@@ -444,17 +435,17 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
}
/* Write a pattern and check it */
- memset(ubi->peb_buf1, patterns[i], ubi->peb_size);
- err = ubi_io_write(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ memset(ubi->peb_buf, patterns[i], ubi->peb_size);
+ err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
- memset(ubi->peb_buf1, ~patterns[i], ubi->peb_size);
- err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
+ memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
if (err)
goto out;
- err = ubi_check_pattern(ubi->peb_buf1, patterns[i],
+ err = ubi_check_pattern(ubi->peb_buf, patterns[i],
ubi->peb_size);
if (err == 0) {
ubi_err("pattern %x checking failed for PEB %d",
@@ -521,8 +512,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
* It is important to first invalidate the EC header, and then the VID
* header. Otherwise a power cut may lead to valid EC header and
* invalid VID header, in which case UBI will treat this PEB as
- * corrupted and will try to preserve it, and print scary warnings (see
- * the header comment in scan.c for more information).
+ * corrupted and will try to preserve it, and print scary warnings.
*/
addr = (loff_t)pnum * ubi->peb_size;
err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
@@ -564,7 +554,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
*/
ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
pnum, err, err1);
- ubi_dbg_dump_flash(ubi, pnum, 0, ubi->peb_size);
+ ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
return -EIO;
}
@@ -590,7 +580,7 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err != 0)
return err;
@@ -695,8 +685,7 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
leb_start = be32_to_cpu(ec_hdr->data_offset);
if (ec_hdr->version != UBI_VERSION) {
- ubi_err("node with incompatible UBI version found: "
- "this UBI version is %d, image version is %d",
+ ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
UBI_VERSION, (int)ec_hdr->version);
goto bad;
}
@@ -722,8 +711,8 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
bad:
ubi_err("bad EC header");
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
return 1;
}
@@ -787,10 +776,10 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
/* The physical eraseblock is supposedly empty */
if (verbose)
- ubi_warn("no EC header found at PEB %d, "
- "only 0xFF bytes", pnum);
- dbg_bld("no EC header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -802,12 +791,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
* 0xFF bytes. Report that the header is corrupted.
*/
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
- ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
+ ubi_dump_ec_hdr(ec_hdr);
}
- dbg_bld("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -816,12 +805,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad EC header CRC at PEB %d, calculated "
- "%#08x, read %#08x", pnum, crc, hdr_crc);
- ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_ec_hdr(ec_hdr);
}
- dbg_bld("bad EC header CRC at PEB %d, calculated "
- "%#08x, read %#08x", pnum, crc, hdr_crc);
+ dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
@@ -875,7 +864,7 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
ec_hdr->hdr_crc = cpu_to_be32(crc);
- err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
if (err)
return err;
@@ -906,40 +895,40 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
int usable_leb_size = ubi->leb_size - data_pad;
if (copy_flag != 0 && copy_flag != 1) {
- dbg_err("bad copy_flag");
+ ubi_err("bad copy_flag");
goto bad;
}
if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
data_pad < 0) {
- dbg_err("negative values");
+ ubi_err("negative values");
goto bad;
}
if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
- dbg_err("bad vol_id");
+ ubi_err("bad vol_id");
goto bad;
}
if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
- dbg_err("bad compat");
+ ubi_err("bad compat");
goto bad;
}
if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
compat != UBI_COMPAT_REJECT) {
- dbg_err("bad compat");
+ ubi_err("bad compat");
goto bad;
}
if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
- dbg_err("bad vol_type");
+ ubi_err("bad vol_type");
goto bad;
}
if (data_pad >= ubi->leb_size / 2) {
- dbg_err("bad data_pad");
+ ubi_err("bad data_pad");
goto bad;
}
@@ -951,45 +940,45 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
* mapped logical eraseblocks.
*/
if (used_ebs == 0) {
- dbg_err("zero used_ebs");
+ ubi_err("zero used_ebs");
goto bad;
}
if (data_size == 0) {
- dbg_err("zero data_size");
+ ubi_err("zero data_size");
goto bad;
}
if (lnum < used_ebs - 1) {
if (data_size != usable_leb_size) {
- dbg_err("bad data_size");
+ ubi_err("bad data_size");
goto bad;
}
} else if (lnum == used_ebs - 1) {
if (data_size == 0) {
- dbg_err("bad data_size at last LEB");
+ ubi_err("bad data_size at last LEB");
goto bad;
}
} else {
- dbg_err("too high lnum");
+ ubi_err("too high lnum");
goto bad;
}
} else {
if (copy_flag == 0) {
if (data_crc != 0) {
- dbg_err("non-zero data CRC");
+ ubi_err("non-zero data CRC");
goto bad;
}
if (data_size != 0) {
- dbg_err("non-zero data_size");
+ ubi_err("non-zero data_size");
goto bad;
}
} else {
if (data_size == 0) {
- dbg_err("zero data_size of copy");
+ ubi_err("zero data_size of copy");
goto bad;
}
}
if (used_ebs != 0) {
- dbg_err("bad used_ebs");
+ ubi_err("bad used_ebs");
goto bad;
}
}
@@ -998,8 +987,8 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
bad:
ubi_err("bad VID header");
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
return 1;
}
@@ -1042,10 +1031,10 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
if (verbose)
- ubi_warn("no VID header found at PEB %d, "
- "only 0xFF bytes", pnum);
- dbg_bld("no VID header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -1053,12 +1042,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
}
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
+ ubi_dump_vid_hdr(vid_hdr);
}
- dbg_bld("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -1067,12 +1056,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_vid_hdr(vid_hdr);
}
- dbg_bld("bad CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
+ dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
else
@@ -1113,7 +1102,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
dbg_io("write VID header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
@@ -1122,7 +1111,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
vid_hdr->hdr_crc = cpu_to_be32(crc);
- err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
if (err)
return err;
@@ -1132,34 +1121,32 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
+ * self_check_not_bad - ensure that a physical eraseblock is not bad.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to check
*
* This function returns zero if the physical eraseblock is good, %-EINVAL if
* it is bad and a negative error code if an error occurred.
*/
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
err = ubi_io_is_bad(ubi, pnum);
if (!err)
return err;
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ dump_stack();
return err > 0 ? -EINVAL : err;
}
/**
- * paranoid_check_ec_hdr - check if an erase counter header is all right.
+ * self_check_ec_hdr - check if an erase counter header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the erase counter header belongs to
* @ec_hdr: the erase counter header to check
@@ -1167,13 +1154,13 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
* This function returns zero if the erase counter header contains valid
* values, and %-EINVAL if not.
*/
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_ec_hdr *ec_hdr)
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr)
{
int err;
uint32_t magic;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(ec_hdr->magic);
@@ -1185,33 +1172,33 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
err = validate_ec_hdr(ubi, ec_hdr);
if (err) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
goto fail;
}
return 0;
fail:
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_peb_ec_hdr - check erase counter header.
+ * self_check_peb_ec_hdr - check erase counter header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the erase counter header is all right and and
* a negative error code if not or if an error occurred.
*/
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1226,14 +1213,14 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
err = -EINVAL;
goto exit;
}
- err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
exit:
kfree(ec_hdr);
@@ -1241,7 +1228,7 @@ exit:
}
/**
- * paranoid_check_vid_hdr - check that a volume identifier header is all right.
+ * self_check_vid_hdr - check that a volume identifier header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the volume identifier header belongs to
* @vid_hdr: the volume identifier header to check
@@ -1249,13 +1236,13 @@ exit:
* This function returns zero if the volume identifier header is all right, and
* %-EINVAL if not.
*/
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_vid_hdr *vid_hdr)
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
{
int err;
uint32_t magic;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
magic = be32_to_cpu(vid_hdr->magic);
@@ -1267,36 +1254,36 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
err = validate_vid_hdr(ubi, vid_hdr);
if (err) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
goto fail;
}
return err;
fail:
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_peb_vid_hdr - check volume identifier header.
+ * self_check_peb_vid_hdr - check volume identifier header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the volume identifier header is all right,
* and a negative error code if not or if an error occurred.
*/
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
struct ubi_vid_hdr *vid_hdr;
void *p;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -1312,16 +1299,16 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
- ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
err = -EINVAL;
goto exit;
}
- err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
exit:
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -1329,7 +1316,7 @@ exit:
}
/**
- * ubi_dbg_check_write - make sure write succeeded.
+ * self_check_write - make sure write succeeded.
* @ubi: UBI device description object
* @buf: buffer with data which were written
* @pnum: physical eraseblock number the data were written to
@@ -1340,15 +1327,15 @@ exit:
* the original data buffer - the data have to match. Returns zero if the data
* match and a negative error code if not or in case of failure.
*/
-int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
- int offset, int len)
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len)
{
int err, i;
size_t read;
void *buf1;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1369,7 +1356,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
if (c == c1)
continue;
- ubi_err("paranoid check failed for PEB %d:%d, len %d",
+ ubi_err("self-check failed for PEB %d:%d, len %d",
pnum, offset, len);
ubi_msg("data differ at position %d", i);
dump_len = max_t(int, 128, len - i);
@@ -1381,7 +1368,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
i, i + dump_len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
buf1 + i, dump_len, 1);
- ubi_dbg_dump_stack();
+ dump_stack();
err = -EINVAL;
goto out_free;
}
@@ -1395,7 +1382,7 @@ out_free:
}
/**
- * ubi_dbg_check_all_ff - check that a region of flash is empty.
+ * ubi_self_check_all_ff - check that a region of flash is empty.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @offset: the starting offset within the physical eraseblock to check
@@ -1405,14 +1392,14 @@ out_free:
* @offset of the physical eraseblock @pnum, and a negative error code if not
* or if an error occurred.
*/
-int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
size_t read;
int err;
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!ubi->dbg->chk_io)
+ if (!ubi_dbg_chk_io(ubi))
return 0;
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1423,15 +1410,15 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
if (err && err != -EUCLEAN) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offset, read);
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
goto error;
}
err = ubi_check_pattern(buf, 0xFF, len);
if (err == 0) {
- ubi_err("flash region at PEB %d:%d, length %d does not "
- "contain all 0xFF bytes", pnum, offset, len);
+ ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+ pnum, offset, len);
goto fail;
}
@@ -1439,14 +1426,12 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
return 0;
fail:
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
ubi_msg("hex dump of the %d-%d region", offset, offset + len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
err = -EINVAL;
error:
- ubi_dbg_dump_stack();
+ dump_stack();
vfree(buf);
return err;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index d39716e5b204..0a2b16b43b3f 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -221,7 +221,7 @@ out_free:
kfree(desc);
out_put_ubi:
ubi_put_device(ubi);
- dbg_err("cannot open device %d, volume %d, error %d",
+ ubi_err("cannot open device %d, volume %d, error %d",
ubi_num, vol_id, err);
return ERR_PTR(err);
}
@@ -426,11 +426,9 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
* @buf: data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
- * @dtype: expected data type
*
* This function writes @len bytes of data from @buf to offset @offset of
- * logical eraseblock @lnum. The @dtype argument describes expected lifetime of
- * the data.
+ * logical eraseblock @lnum.
*
* This function takes care of physical eraseblock write failures. If write to
* the physical eraseblock write operation fails, the logical eraseblock is
@@ -447,7 +445,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
* returns immediately with %-EBADF code.
*/
int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int offset, int len, int dtype)
+ int offset, int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -466,17 +464,13 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
- return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_write);
@@ -486,7 +480,6 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
* @lnum: logical eraseblock number to change
* @buf: data to write
* @len: how many bytes to write
- * @dtype: expected data type
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
@@ -497,7 +490,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
* code in case of failure.
*/
int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int len, int dtype)
+ int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -515,17 +508,13 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
- return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_change);
@@ -562,7 +551,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
if (err)
return err;
- return ubi_wl_flush(ubi);
+ return ubi_wl_flush(ubi, vol->vol_id, lnum);
}
EXPORT_SYMBOL_GPL(ubi_leb_erase);
@@ -626,7 +615,6 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
* ubi_leb_map - map logical eraseblock to a physical eraseblock.
* @desc: volume descriptor
* @lnum: logical eraseblock number
- * @dtype: expected data type
*
* This function maps an un-mapped logical eraseblock @lnum to a physical
* eraseblock. This means, that after a successful invocation of this
@@ -639,7 +627,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
* eraseblock is already mapped, and other negative error codes in case of
* other failures.
*/
-int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -652,17 +640,13 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
if (lnum < 0 || lnum >= vol->reserved_pebs)
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (vol->eba_tbl[lnum] >= 0)
return -EBADMSG;
- return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(ubi_leb_map);
@@ -722,6 +706,33 @@ int ubi_sync(int ubi_num)
}
EXPORT_SYMBOL_GPL(ubi_sync);
+/**
+ * ubi_flush - flush UBI work queue.
+ * @ubi_num: UBI device to flush work queue
+ * @vol_id: volume id to flush for
+ * @lnum: logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id / logical
+ * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
+ * a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_flush(int ubi_num, int vol_id, int lnum)
+{
+ struct ubi_device *ubi;
+ int err = 0;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ err = ubi_wl_flush(ubi, vol_id, lnum);
+ ubi_put_device(ubi);
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_flush);
+
BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
/**
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index ff2a65c37f69..554867dfd18b 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -92,16 +92,45 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
}
/**
- * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad
+ * ubi_update_reserved - update bad eraseblock handling accounting data.
+ * @ubi: UBI device description object
+ *
+ * This function calculates the gap between current number of PEBs reserved for
+ * bad eraseblock handling and the required level of PEBs that must be
+ * reserved, and if necessary, reserves more PEBs to fill that gap, according
+ * to availability. Should be called with ubi->volumes_lock held.
+ */
+void ubi_update_reserved(struct ubi_device *ubi)
+{
+ int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+
+ if (need <= 0 || ubi->avail_pebs == 0)
+ return;
+
+ need = min_t(int, need, ubi->avail_pebs);
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ ubi_msg("reserved more %d PEBs for bad PEB handling", need);
+}
+
+/**
+ * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
* eraseblock handling.
* @ubi: UBI device description object
*/
void ubi_calculate_reserved(struct ubi_device *ubi)
{
- ubi->beb_rsvd_level = ubi->good_peb_count/100;
- ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE;
- if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS)
- ubi->beb_rsvd_level = MIN_RESEVED_PEBS;
+ /*
+ * Calculate the actual number of PEBs currently needed to be reserved
+ * for future bad eraseblock handling.
+ */
+ ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
+ if (ubi->beb_rsvd_level < 0) {
+ ubi->beb_rsvd_level = 0;
+ ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+ ubi->bad_peb_count, ubi->bad_peb_limit);
+ }
}
/**
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
deleted file mode 100644
index d48aef15ab5d..000000000000
--- a/drivers/mtd/ubi/scan.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) International Business Machines Corp., 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Author: Artem Bityutskiy (Битюцкий Артём)
- */
-
-#ifndef __UBI_SCAN_H__
-#define __UBI_SCAN_H__
-
-/* The erase counter value for this physical eraseblock is unknown */
-#define UBI_SCAN_UNKNOWN_EC (-1)
-
-/**
- * struct ubi_scan_leb - scanning information about a physical eraseblock.
- * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown)
- * @pnum: physical eraseblock number
- * @lnum: logical eraseblock number
- * @scrub: if this physical eraseblock needs scrubbing
- * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
- * @sqnum: sequence number
- * @u: unions RB-tree or @list links
- * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects
- * @u.list: link in one of the eraseblock lists
- *
- * One object of this type is allocated for each physical eraseblock during
- * scanning.
- */
-struct ubi_scan_leb {
- int ec;
- int pnum;
- int lnum;
- unsigned int scrub:1;
- unsigned int copy_flag:1;
- unsigned long long sqnum;
- union {
- struct rb_node rb;
- struct list_head list;
- } u;
-};
-
-/**
- * struct ubi_scan_volume - scanning information about a volume.
- * @vol_id: volume ID
- * @highest_lnum: highest logical eraseblock number in this volume
- * @leb_count: number of logical eraseblocks in this volume
- * @vol_type: volume type
- * @used_ebs: number of used logical eraseblocks in this volume (only for
- * static volumes)
- * @last_data_size: amount of data in the last logical eraseblock of this
- * volume (always equivalent to the usable logical eraseblock
- * size in case of dynamic volumes)
- * @data_pad: how many bytes at the end of logical eraseblocks of this volume
- * are not used (due to volume alignment)
- * @compat: compatibility flags of this volume
- * @rb: link in the volume RB-tree
- * @root: root of the RB-tree containing all the eraseblock belonging to this
- * volume (&struct ubi_scan_leb objects)
- *
- * One object of this type is allocated for each volume during scanning.
- */
-struct ubi_scan_volume {
- int vol_id;
- int highest_lnum;
- int leb_count;
- int vol_type;
- int used_ebs;
- int last_data_size;
- int data_pad;
- int compat;
- struct rb_node rb;
- struct rb_root root;
-};
-
-/**
- * struct ubi_scan_info - UBI scanning information.
- * @volumes: root of the volume RB-tree
- * @corr: list of corrupted physical eraseblocks
- * @free: list of free physical eraseblocks
- * @erase: list of physical eraseblocks which have to be erased
- * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
- * those belonging to "preserve"-compatible internal volumes)
- * @corr_peb_count: count of PEBs in the @corr list
- * @empty_peb_count: count of PEBs which are presumably empty (contain only
- * 0xFF bytes)
- * @alien_peb_count: count of PEBs in the @alien list
- * @bad_peb_count: count of bad physical eraseblocks
- * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
- * as bad yet, but which look like bad
- * @vols_found: number of volumes found during scanning
- * @highest_vol_id: highest volume ID
- * @is_empty: flag indicating whether the MTD device is empty or not
- * @min_ec: lowest erase counter value
- * @max_ec: highest erase counter value
- * @max_sqnum: highest sequence number value
- * @mean_ec: mean erase counter value
- * @ec_sum: a temporary variable used when calculating @mean_ec
- * @ec_count: a temporary variable used when calculating @mean_ec
- * @scan_leb_slab: slab cache for &struct ubi_scan_leb objects
- *
- * This data structure contains the result of scanning and may be used by other
- * UBI sub-systems to build final UBI data structures, further error-recovery
- * and so on.
- */
-struct ubi_scan_info {
- struct rb_root volumes;
- struct list_head corr;
- struct list_head free;
- struct list_head erase;
- struct list_head alien;
- int corr_peb_count;
- int empty_peb_count;
- int alien_peb_count;
- int bad_peb_count;
- int maybe_bad_peb_count;
- int vols_found;
- int highest_vol_id;
- int is_empty;
- int min_ec;
- int max_ec;
- unsigned long long max_sqnum;
- int mean_ec;
- uint64_t ec_sum;
- int ec_count;
- struct kmem_cache *scan_leb_slab;
-};
-
-struct ubi_device;
-struct ubi_vid_hdr;
-
-/*
- * ubi_scan_move_to_list - move a PEB from the volume tree to a list.
- *
- * @sv: volume scanning information
- * @seb: scanning eraseblock information
- * @list: the list to move to
- */
-static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
- struct ubi_scan_leb *seb,
- struct list_head *list)
-{
- rb_erase(&seb->u.rb, &sv->root);
- list_add_tail(&seb->u.list, list);
-}
-
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
- int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
- int bitflips);
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
- int vol_id);
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
- int lnum);
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv);
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
- struct ubi_scan_info *si);
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
- int pnum, int ec);
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi);
-void ubi_scan_destroy_si(struct ubi_scan_info *si);
-
-#endif /* !__UBI_SCAN_H__ */
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 6fb8ec2174a5..ac2b24d1783d 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -149,10 +149,10 @@ enum {
* The @image_seq field is used to validate a UBI image that has been prepared
* for a UBI device. The @image_seq value can be any value, but it must be the
* same on all eraseblocks. UBI will ensure that all new erase counter headers
- * also contain this value, and will check the value when scanning at start-up.
+ * also contain this value, and will check the value when attaching the flash.
* One way to make use of @image_seq is to increase its value by one every time
* an image is flashed over an existing image, then, if the flashing does not
- * complete, UBI will detect the error when scanning.
+ * complete, UBI will detect the error when attaching the media.
*/
struct ubi_ec_hdr {
__be32 magic;
@@ -298,8 +298,8 @@ struct ubi_vid_hdr {
#define UBI_INT_VOL_COUNT 1
/*
- * Starting ID of internal volumes. There is reserved room for 4096 internal
- * volumes.
+ * Starting ID of internal volumes: 0x7fffefff.
+ * There is reserved room for 4096 internal volumes.
*/
#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
@@ -375,4 +375,141 @@ struct ubi_vtbl_record {
__be32 crc;
} __packed;
+/* UBI fastmap on-flash data structures */
+
+#define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1)
+#define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2)
+
+/* fastmap on-flash data structure format version */
+#define UBI_FM_FMT_VERSION 1
+
+#define UBI_FM_SB_MAGIC 0x7B11D69F
+#define UBI_FM_HDR_MAGIC 0xD4B82EF7
+#define UBI_FM_VHDR_MAGIC 0xFA370ED1
+#define UBI_FM_POOL_MAGIC 0x67AF4D08
+#define UBI_FM_EBA_MAGIC 0xf0c040a8
+
+/* A fastmap supber block can be located between PEB 0 and
+ * UBI_FM_MAX_START */
+#define UBI_FM_MAX_START 64
+
+/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
+#define UBI_FM_MAX_BLOCKS 32
+
+/* 5% of the total number of PEBs have to be scanned while attaching
+ * from a fastmap.
+ * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
+ * UBI_FM_MAX_POOL_SIZE */
+#define UBI_FM_MIN_POOL_SIZE 8
+#define UBI_FM_MAX_POOL_SIZE 256
+
+#define UBI_FM_WL_POOL_SIZE 25
+
+/**
+ * struct ubi_fm_sb - UBI fastmap super block
+ * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
+ * @version: format version of this fastmap
+ * @data_crc: CRC over the fastmap data
+ * @used_blocks: number of PEBs used by this fastmap
+ * @block_loc: an array containing the location of all PEBs of the fastmap
+ * @block_ec: the erase counter of each used PEB
+ * @sqnum: highest sequence number value at the time while taking the fastmap
+ *
+ */
+struct ubi_fm_sb {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be32 data_crc;
+ __be32 used_blocks;
+ __be32 block_loc[UBI_FM_MAX_BLOCKS];
+ __be32 block_ec[UBI_FM_MAX_BLOCKS];
+ __be64 sqnum;
+ __u8 padding2[32];
+} __packed;
+
+/**
+ * struct ubi_fm_hdr - header of the fastmap data set
+ * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
+ * @free_peb_count: number of free PEBs known by this fastmap
+ * @used_peb_count: number of used PEBs known by this fastmap
+ * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
+ * @bad_peb_count: number of bad PEBs known by this fastmap
+ * @erase_peb_count: number of bad PEBs which have to be erased
+ * @vol_count: number of UBI volumes known by this fastmap
+ */
+struct ubi_fm_hdr {
+ __be32 magic;
+ __be32 free_peb_count;
+ __be32 used_peb_count;
+ __be32 scrub_peb_count;
+ __be32 bad_peb_count;
+ __be32 erase_peb_count;
+ __be32 vol_count;
+ __u8 padding[4];
+} __packed;
+
+/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
+
+/**
+ * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
+ * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
+ * @size: current pool size
+ * @max_size: maximal pool size
+ * @pebs: an array containing the location of all PEBs in this pool
+ */
+struct ubi_fm_scan_pool {
+ __be32 magic;
+ __be16 size;
+ __be16 max_size;
+ __be32 pebs[UBI_FM_MAX_POOL_SIZE];
+ __be32 padding[4];
+} __packed;
+
+/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
+
+/**
+ * struct ubi_fm_ec - stores the erase counter of a PEB
+ * @pnum: PEB number
+ * @ec: ec of this PEB
+ */
+struct ubi_fm_ec {
+ __be32 pnum;
+ __be32 ec;
+} __packed;
+
+/**
+ * struct ubi_fm_volhdr - Fastmap volume header
+ * it identifies the start of an eba table
+ * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
+ * @vol_id: volume id of the fastmapped volume
+ * @vol_type: type of the fastmapped volume
+ * @data_pad: data_pad value of the fastmapped volume
+ * @used_ebs: number of used LEBs within this volume
+ * @last_eb_bytes: number of bytes used in the last LEB
+ */
+struct ubi_fm_volhdr {
+ __be32 magic;
+ __be32 vol_id;
+ __u8 vol_type;
+ __u8 padding1[3];
+ __be32 data_pad;
+ __be32 used_ebs;
+ __be32 last_eb_bytes;
+ __u8 padding2[8];
+} __packed;
+
+/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
+
+/**
+ * struct ubi_fm_eba - denotes an association beween a PEB and LEB
+ * @magic: EBA table magic number
+ * @reserved_pebs: number of table entries
+ * @pnum: PEB number of LEB (LEB is the index)
+ */
+struct ubi_fm_eba {
+ __be32 magic;
+ __be32 reserved_pebs;
+ __be32 pnum[0];
+} __packed;
#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index d51d75d34446..8ea6297a208f 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -43,7 +43,6 @@
#include <asm/pgtable.h>
#include "ubi-media.h"
-#include "scan.h"
/* Maximum number of supported UBI devices */
#define UBI_MAX_DEVICES 32
@@ -52,21 +51,21 @@
#define UBI_NAME_STR "ubi"
/* Normal UBI messages */
-#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
+#define ubi_msg(fmt, ...) pr_notice("UBI: " fmt "\n", ##__VA_ARGS__)
/* UBI warning messages */
-#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
- __func__, ##__VA_ARGS__)
+#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n", \
+ __func__, ##__VA_ARGS__)
/* UBI error messages */
-#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
+#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n", \
__func__, ##__VA_ARGS__)
-/* Lowest number PEBs reserved for bad PEB handling */
-#define MIN_RESEVED_PEBS 2
-
/* Background thread name pattern */
#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
-/* This marker in the EBA table means that the LEB is um-mapped */
+/*
+ * This marker in the EBA table means that the LEB is um-mapped.
+ * NOTE! It has to have the same value as %UBI_ALL.
+ */
#define UBI_LEB_UNMAPPED -1
/*
@@ -82,6 +81,16 @@
*/
#define UBI_PROT_QUEUE_LEN 10
+/* The volume ID/LEB number/erase counter is unknown */
+#define UBI_UNKNOWN -1
+
+/*
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
/*
* Error codes returned by the I/O sub-system.
*
@@ -118,7 +127,7 @@ enum {
* PEB
* MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
* PEB
- * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
+ * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the
* target PEB
* MOVE_RETRY: retry scrubbing the PEB
*/
@@ -127,10 +136,21 @@ enum {
MOVE_SOURCE_RD_ERR,
MOVE_TARGET_RD_ERR,
MOVE_TARGET_WR_ERR,
- MOVE_CANCEL_BITFLIPS,
+ MOVE_TARGET_BITFLIPS,
MOVE_RETRY,
};
+/*
+ * Return codes of the fastmap sub-system
+ *
+ * UBI_NO_FASTMAP: No fastmap super block was found
+ * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
+ */
+enum {
+ UBI_NO_FASTMAP = 1,
+ UBI_BAD_FASTMAP,
+};
+
/**
* struct ubi_wl_entry - wear-leveling entry.
* @u.rb: link in the corresponding (free/used) RB-tree
@@ -197,6 +217,41 @@ struct ubi_rename_entry {
struct ubi_volume_desc;
/**
+ * struct ubi_fastmap_layout - in-memory fastmap data structure.
+ * @e: PEBs used by the current fastmap
+ * @to_be_tortured: if non-zero tortured this PEB
+ * @used_blocks: number of used PEBs
+ * @max_pool_size: maximal size of the user pool
+ * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
+ */
+struct ubi_fastmap_layout {
+ struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
+ int to_be_tortured[UBI_FM_MAX_BLOCKS];
+ int used_blocks;
+ int max_pool_size;
+ int max_wl_pool_size;
+};
+
+/**
+ * struct ubi_fm_pool - in-memory fastmap pool
+ * @pebs: PEBs in this pool
+ * @used: number of used PEBs
+ * @size: total number of PEBs in this pool
+ * @max_size: maximal size of the pool
+ *
+ * A pool gets filled with up to max_size.
+ * If all PEBs within the pool are used a new fastmap will be written
+ * to the flash and the pool gets refilled with empty PEBs.
+ *
+ */
+struct ubi_fm_pool {
+ int pebs[UBI_FM_MAX_POOL_SIZE];
+ int used;
+ int size;
+ int max_size;
+};
+
+/**
* struct ubi_volume - UBI volume description data structure.
* @dev: device object to make use of the the Linux device model
* @cdev: character device object to create character device
@@ -222,8 +277,6 @@ struct ubi_volume_desc;
* @upd_ebs: how many eraseblocks are expected to be updated
* @ch_lnum: LEB number which is being changing by the atomic LEB change
* operation
- * @ch_dtype: data persistency type which is being changing by the atomic LEB
- * change operation
* @upd_bytes: how many bytes are expected to be received for volume update or
* atomic LEB change
* @upd_received: how many bytes were already received for volume update or
@@ -270,7 +323,6 @@ struct ubi_volume {
int upd_ebs;
int ch_lnum;
- int ch_dtype;
long long upd_bytes;
long long upd_received;
void *upd_buf;
@@ -297,6 +349,37 @@ struct ubi_volume_desc {
struct ubi_wl_entry;
/**
+ * struct ubi_debug_info - debugging information for an UBI device.
+ *
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ */
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
+};
+
+/**
* struct ubi_device - UBI device description structure
* @dev: UBI device object to use the the Linux device model
* @cdev: character device object to create character device
@@ -334,9 +417,21 @@ struct ubi_wl_entry;
* @ltree: the lock tree
* @alc_mutex: serializes "atomic LEB change" operations
*
+ * @fm_disabled: non-zero if fastmap is disabled (default)
+ * @fm: in-memory data structure of the currently used fastmap
+ * @fm_pool: in-memory data structure of the fastmap pool
+ * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
+ * sub-system
+ * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
+ * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
+ * @fm_size: fastmap size in bytes
+ * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
+ * @fm_work: fastmap work queue
+ *
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
+ * @free_count: Contains the number of elements in @free
* @scrub: RB-tree of physical eraseblocks which need scrubbing
* @pq: protection queue (contain physical eraseblocks which are temporarily
* protected from the wear-leveling worker)
@@ -361,6 +456,7 @@ struct ubi_wl_entry;
* @flash_size: underlying MTD device size (in bytes)
* @peb_count: count of physical eraseblocks on the MTD device
* @peb_size: physical eraseblock size
+ * @bad_peb_limit: top limit of expected bad physical eraseblocks
* @bad_peb_count: count of bad physical eraseblocks
* @good_peb_count: count of good physical eraseblocks
* @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
@@ -387,9 +483,8 @@ struct ubi_wl_entry;
* time (MTD write buffer size)
* @mtd: MTD device descriptor
*
- * @peb_buf1: a buffer of PEB size used for different purposes
- * @peb_buf2: another buffer of PEB size used for different purposes
- * @buf_mutex: protects @peb_buf1 and @peb_buf2
+ * @peb_buf: a buffer of PEB size used for different purposes
+ * @buf_mutex: protects @peb_buf
* @ckvol_mutex: serializes static volume checking when opening
*
* @dbg: debugging information for this UBI device
@@ -409,6 +504,7 @@ struct ubi_device {
int avail_pebs;
int beb_rsvd_pebs;
int beb_rsvd_level;
+ int bad_peb_limit;
int autoresize_vol_id;
int vtbl_slots;
@@ -426,10 +522,22 @@ struct ubi_device {
struct rb_root ltree;
struct mutex alc_mutex;
+ /* Fastmap stuff */
+ int fm_disabled;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_fm_pool fm_pool;
+ struct ubi_fm_pool fm_wl_pool;
+ struct rw_semaphore fm_sem;
+ struct mutex fm_mutex;
+ void *fm_buf;
+ size_t fm_size;
+ struct work_struct fm_work;
+
/* Wear-leveling sub-system's stuff */
struct rb_root used;
struct rb_root erroneous;
struct rb_root free;
+ int free_count;
struct rb_root scrub;
struct list_head pq[UBI_PROT_QUEUE_LEN];
int pq_head;
@@ -471,12 +579,155 @@ struct ubi_device {
int max_write_size;
struct mtd_info *mtd;
- void *peb_buf1;
- void *peb_buf2;
+ void *peb_buf;
struct mutex buf_mutex;
struct mutex ckvol_mutex;
- struct ubi_debug_info *dbg;
+ struct ubi_debug_info dbg;
+};
+
+/**
+ * struct ubi_ainf_peb - attach information about a physical eraseblock.
+ * @ec: erase counter (%UBI_UNKNOWN if it is unknown)
+ * @pnum: physical eraseblock number
+ * @vol_id: ID of the volume this LEB belongs to
+ * @lnum: logical eraseblock number
+ * @scrub: if this physical eraseblock needs scrubbing
+ * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
+ * @sqnum: sequence number
+ * @u: unions RB-tree or @list links
+ * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
+ * @u.list: link in one of the eraseblock lists
+ *
+ * One object of this type is allocated for each physical eraseblock when
+ * attaching an MTD device. Note, if this PEB does not belong to any LEB /
+ * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN.
+ */
+struct ubi_ainf_peb {
+ int ec;
+ int pnum;
+ int vol_id;
+ int lnum;
+ unsigned int scrub:1;
+ unsigned int copy_flag:1;
+ unsigned long long sqnum;
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+};
+
+/**
+ * struct ubi_ainf_volume - attaching information about a volume.
+ * @vol_id: volume ID
+ * @highest_lnum: highest logical eraseblock number in this volume
+ * @leb_count: number of logical eraseblocks in this volume
+ * @vol_type: volume type
+ * @used_ebs: number of used logical eraseblocks in this volume (only for
+ * static volumes)
+ * @last_data_size: amount of data in the last logical eraseblock of this
+ * volume (always equivalent to the usable logical eraseblock
+ * size in case of dynamic volumes)
+ * @data_pad: how many bytes at the end of logical eraseblocks of this volume
+ * are not used (due to volume alignment)
+ * @compat: compatibility flags of this volume
+ * @rb: link in the volume RB-tree
+ * @root: root of the RB-tree containing all the eraseblock belonging to this
+ * volume (&struct ubi_ainf_peb objects)
+ *
+ * One object of this type is allocated for each volume when attaching an MTD
+ * device.
+ */
+struct ubi_ainf_volume {
+ int vol_id;
+ int highest_lnum;
+ int leb_count;
+ int vol_type;
+ int used_ebs;
+ int last_data_size;
+ int data_pad;
+ int compat;
+ struct rb_node rb;
+ struct rb_root root;
+};
+
+/**
+ * struct ubi_attach_info - MTD device attaching information.
+ * @volumes: root of the volume RB-tree
+ * @corr: list of corrupted physical eraseblocks
+ * @free: list of free physical eraseblocks
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ * those belonging to "preserve"-compatible internal volumes)
+ * @corr_peb_count: count of PEBs in the @corr list
+ * @empty_peb_count: count of PEBs which are presumably empty (contain only
+ * 0xFF bytes)
+ * @alien_peb_count: count of PEBs in the @alien list
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
+ * as bad yet, but which look like bad
+ * @vols_found: number of volumes found
+ * @highest_vol_id: highest volume ID
+ * @is_empty: flag indicating whether the MTD device is empty or not
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+ * @mean_ec: mean erase counter value
+ * @ec_sum: a temporary variable used when calculating @mean_ec
+ * @ec_count: a temporary variable used when calculating @mean_ec
+ * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ *
+ * This data structure contains the result of attaching an MTD device and may
+ * be used by other UBI sub-systems to build final UBI data structures, further
+ * error-recovery and so on.
+ */
+struct ubi_attach_info {
+ struct rb_root volumes;
+ struct list_head corr;
+ struct list_head free;
+ struct list_head erase;
+ struct list_head alien;
+ int corr_peb_count;
+ int empty_peb_count;
+ int alien_peb_count;
+ int bad_peb_count;
+ int maybe_bad_peb_count;
+ int vols_found;
+ int highest_vol_id;
+ int is_empty;
+ int min_ec;
+ int max_ec;
+ unsigned long long max_sqnum;
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
+ struct kmem_cache *aeb_slab_cache;
+};
+
+/**
+ * struct ubi_work - UBI work description data structure.
+ * @list: a link in the list of pending works
+ * @func: worker function
+ * @e: physical eraseblock to erase
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
+ * @torture: if the physical eraseblock has to be tortured
+ * @anchor: produce a anchor PEB to by used by fastmap
+ *
+ * The @func pointer points to the worker function. If the @cancel argument is
+ * not zero, the worker has to free the resources and exit immediately. The
+ * worker has to return zero in case of success and a negative error code in
+ * case of failure.
+ */
+struct ubi_work {
+ struct list_head list;
+ int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
+ /* The below fields are only relevant to erasure works */
+ struct ubi_wl_entry *e;
+ int vol_id;
+ int lnum;
+ int torture;
+ int anchor;
};
#include "debug.h"
@@ -489,12 +740,23 @@ extern struct class *ubi_class;
extern struct mutex ubi_devices_mutex;
extern struct blocking_notifier_head ubi_notifiers;
+/* attach.c */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id);
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai);
+int ubi_attach(struct ubi_device *ubi, int force_scan);
+void ubi_destroy_ai(struct ubi_attach_info *ai);
+
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
struct ubi_vtbl_record *vtbl_rec);
int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
struct list_head *rename_list);
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai);
/* vmt.c */
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
@@ -518,6 +780,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
int length);
int ubi_check_volume(struct ubi_device *ubi, int vol_id);
+void ubi_update_reserved(struct ubi_device *ubi);
void ubi_calculate_reserved(struct ubi_device *ubi);
int ubi_check_pattern(const void *buf, uint8_t patt, int size);
@@ -527,24 +790,33 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check);
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
- const void *buf, int offset, int len, int dtype);
+ const void *buf, int offset, int len);
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype,
- int used_ebs);
+ int lnum, const void *buf, int len, int used_ebs);
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype);
+ int lnum, const void *buf, int len);
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr);
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan);
/* wl.c */
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture);
-int ubi_wl_flush(struct ubi_device *ubi);
+int ubi_wl_get_peb(struct ubi_device *ubi);
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture);
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
void ubi_wl_close(struct ubi_device *ubi);
int ubi_thread(void *u);
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
+ int lnum, int torture);
+int ubi_is_erase_work(struct ubi_work *wrk);
+void ubi_refill_pools(struct ubi_device *ubi);
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -564,7 +836,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
/* build.c */
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
@@ -575,11 +848,21 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_notify_all(struct ubi_device *ubi, int ntype,
struct notifier_block *nb);
int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_internal_volumes(struct ubi_device *ubi);
/* kapi.c */
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
+/* scan.c */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr);
+
+/* fastmap.c */
+size_t ubi_calc_fm_size(struct ubi_device *ubi);
+int ubi_update_fastmap(struct ubi_device *ubi);
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor);
/*
* ubi_rb_for_each_entry - walk an RB-tree.
@@ -595,6 +878,21 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
rb = rb_next(rb), \
pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
+/*
+ * ubi_move_aeb_to_list - move a PEB from the volume tree to a list.
+ *
+ * @av: volume attaching information
+ * @aeb: attaching eraseblock information
+ * @list: the list to move to
+ */
+static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
+ struct ubi_ainf_peb *aeb,
+ struct list_head *list)
+{
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, list);
+}
+
/**
* ubi_zalloc_vid_hdr - allocate a volume identifier header object.
* @ubi: UBI device description object
@@ -669,7 +967,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
if (!ubi->ro_mode) {
ubi->ro_mode = 1;
ubi_warn("switch to read-only mode");
- ubi_dbg_dump_stack();
+ dump_stack();
}
}
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 425bf5a3edd4..ec2c2dc1c1ca 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -64,8 +64,7 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
return 0;
}
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
vtbl_rec.upd_marker = 1;
mutex_lock(&ubi->device_mutex);
@@ -93,8 +92,7 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_gen("clear update marker for volume %d", vol->vol_id);
- memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
- sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol->vol_id];
ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
vtbl_rec.upd_marker = 0;
@@ -147,7 +145,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
}
if (bytes == 0) {
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
if (err)
return err;
@@ -186,14 +184,12 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_gen("start changing LEB %d:%d, %u bytes",
vol->vol_id, req->lnum, req->bytes);
if (req->bytes == 0)
- return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
- req->dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
vol->upd_bytes = req->bytes;
vol->upd_received = 0;
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
- vol->ch_dtype = req->dtype;
vol->upd_buf = vmalloc(req->bytes);
if (!vol->upd_buf)
@@ -246,8 +242,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
return 0;
}
- err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len,
- UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
} else {
/*
* When writing static volume, and this is the last logical
@@ -259,8 +254,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
* contain zeros, not random trash.
*/
memset(buf + len, 0, vol->usable_leb_size - len);
- err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
- UBI_UNKNOWN, used_ebs);
+ err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
}
return err;
@@ -365,7 +359,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
ubi_assert(vol->upd_received <= vol->upd_bytes);
if (vol->upd_received == vol->upd_bytes) {
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
if (err)
return err;
/* The update is finished, clear the update marker */
@@ -421,7 +415,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
len - vol->upd_bytes);
len = ubi_calc_data_len(ubi, vol->upd_buf, len);
err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
- vol->upd_buf, len, UBI_UNKNOWN);
+ vol->upd_buf, len);
if (err)
return err;
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 97e093d19672..e4c677a4c969 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -28,11 +28,7 @@
#include <linux/slab.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_volumes(struct ubi_device *ubi);
-#else
-#define paranoid_check_volumes(ubi) 0
-#endif
+static int self_check_volumes(struct ubi_device *ubi);
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
@@ -226,7 +222,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
}
if (vol_id == UBI_VOL_NUM_AUTO) {
- dbg_err("out of volume IDs");
+ ubi_err("out of volume IDs");
err = -ENFILE;
goto out_unlock;
}
@@ -240,7 +236,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Ensure that this volume does not exist */
err = -EEXIST;
if (ubi->volumes[vol_id]) {
- dbg_err("volume %d already exists", vol_id);
+ ubi_err("volume %d already exists", vol_id);
goto out_unlock;
}
@@ -249,7 +245,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (ubi->volumes[i] &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
- dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
+ ubi_err("volume \"%s\" exists (ID %d)", req->name, i);
goto out_unlock;
}
@@ -260,9 +256,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
- dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
+ ubi_err("not enough PEBs, only %d available", ubi->avail_pebs);
if (ubi->corr_peb_count)
- dbg_err("%d PEBs are corrupted and not used",
+ ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
err = -ENOSPC;
goto out_unlock;
@@ -283,7 +279,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
* Finish all pending erases because there may be some LEBs belonging
* to the same volume ID.
*/
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
if (err)
goto out_acc;
@@ -359,8 +355,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while creating volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_sysfs:
@@ -447,21 +442,13 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
ubi->avail_pebs += reserved_pebs;
- i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (i > 0) {
- i = ubi->avail_pebs >= i ? i : ubi->avail_pebs;
- ubi->avail_pebs -= i;
- ubi->rsvd_pebs += i;
- ubi->beb_rsvd_pebs += i;
- if (i > 0)
- ubi_msg("reserve more %d PEBs", i);
- }
+ ubi_update_reserved(ubi);
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
- if (!no_vtbl && paranoid_check_volumes(ubi))
- dbg_err("check failed while removing volume %d", vol_id);
+ if (!no_vtbl)
+ self_check_volumes(ubi);
return err;
@@ -499,7 +486,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
- dbg_err("too small size %d, %d LEBs contain data",
+ ubi_err("too small size %d, %d LEBs contain data",
reserved_pebs, vol->used_ebs);
return -EINVAL;
}
@@ -528,10 +515,10 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (pebs > 0) {
spin_lock(&ubi->volumes_lock);
if (pebs > ubi->avail_pebs) {
- dbg_err("not enough PEBs: requested %d, available %d",
+ ubi_err("not enough PEBs: requested %d, available %d",
pebs, ubi->avail_pebs);
if (ubi->corr_peb_count)
- dbg_err("%d PEBs are corrupted and not used",
+ ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
spin_unlock(&ubi->volumes_lock);
err = -ENOSPC;
@@ -547,7 +534,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
/* Change volume table record */
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+ vtbl_rec = ubi->vtbl[vol_id];
vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
@@ -562,15 +549,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
ubi->avail_pebs -= pebs;
- pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (pebs > 0) {
- pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs;
- ubi->avail_pebs -= pebs;
- ubi->rsvd_pebs += pebs;
- ubi->beb_rsvd_pebs += pebs;
- if (pebs > 0)
- ubi_msg("reserve more %d PEBs", pebs);
- }
+ ubi_update_reserved(ubi);
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = vol->eba_tbl[i];
kfree(vol->eba_tbl);
@@ -587,8 +566,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while re-sizing volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_acc:
@@ -637,8 +615,8 @@ int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
}
}
- if (!err && paranoid_check_volumes(ubi))
- ;
+ if (!err)
+ self_check_volumes(ubi);
return err;
}
@@ -685,8 +663,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
return err;
}
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while adding volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_cdev:
@@ -711,16 +688,14 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
volume_sysfs_close(vol);
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_volume - check volume information.
+ * self_check_volume - check volume information.
* @ubi: UBI device description object
* @vol_id: volume ID
*
* Returns zero if volume is all right and a a negative error code if not.
*/
-static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
+static int self_check_volume(struct ubi_device *ubi, int vol_id)
{
int idx = vol_id2idx(ubi, vol_id);
int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
@@ -770,7 +745,7 @@ static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
}
if (vol->upd_marker && vol->corrupted) {
- dbg_err("update marker and corrupted simultaneously");
+ ubi_err("update marker and corrupted simultaneously");
goto fail;
}
@@ -852,34 +827,33 @@ static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
return 0;
fail:
- ubi_err("paranoid check failed for volume %d", vol_id);
+ ubi_err("self-check failed for volume %d", vol_id);
if (vol)
- ubi_dbg_dump_vol_info(vol);
- ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ ubi_dump_vol_info(vol);
+ ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
dump_stack();
spin_unlock(&ubi->volumes_lock);
return -EINVAL;
}
/**
- * paranoid_check_volumes - check information about all volumes.
+ * self_check_volumes - check information about all volumes.
* @ubi: UBI device description object
*
* Returns zero if volumes are all right and a a negative error code if not.
*/
-static int paranoid_check_volumes(struct ubi_device *ubi)
+static int self_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < ubi->vtbl_slots; i++) {
- err = paranoid_check_volume(ubi, i);
+ err = self_check_volume(ubi, i);
if (err)
break;
}
return err;
}
-#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 4b50a3029b84..30f84eff6385 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -37,16 +37,15 @@
* LEB 1. This scheme guarantees recoverability from unclean reboots.
*
* In this UBI implementation the on-flash volume table does not contain any
- * information about how many data static volumes contain. This information may
- * be found from the scanning data.
+ * information about how much data static volumes contain.
*
* But it would still be beneficial to store this information in the volume
* table. For example, suppose we have a static volume X, and all its physical
* eraseblocks became bad for some reasons. Suppose we are attaching the
- * corresponding MTD device, the scanning has found no logical eraseblocks
+ * corresponding MTD device, for some reason we find no logical eraseblocks
* corresponding to the volume X. According to the volume table volume X does
* exist. So we don't know whether it is just empty or all its physical
- * eraseblocks went bad. So we cannot alarm the user about this corruption.
+ * eraseblocks went bad. So we cannot alarm the user properly.
*
* The volume table also stores so-called "update marker", which is used for
* volume updates. Before updating the volume, the update marker is set, and
@@ -62,11 +61,7 @@
#include <asm/div64.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static void paranoid_vtbl_check(const struct ubi_device *ubi);
-#else
-#define paranoid_vtbl_check(ubi)
-#endif
+static void self_vtbl_check(const struct ubi_device *ubi);
/* Empty volume table record */
static struct ubi_vtbl_record empty_vtbl_record;
@@ -106,12 +101,12 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
return err;
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
- ubi->vtbl_size, UBI_LONGTERM);
+ ubi->vtbl_size);
if (err)
return err;
}
- paranoid_vtbl_check(ubi);
+ self_vtbl_check(ubi);
return 0;
}
@@ -158,7 +153,7 @@ int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
return err;
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
- ubi->vtbl_size, UBI_LONGTERM);
+ ubi->vtbl_size);
if (err)
return err;
}
@@ -197,7 +192,7 @@ static int vtbl_check(const struct ubi_device *ubi,
if (be32_to_cpu(vtbl[i].crc) != crc) {
ubi_err("bad CRC at record %u: %#08x, not %#08x",
i, crc, be32_to_cpu(vtbl[i].crc));
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[i], i);
return 1;
}
@@ -229,7 +224,7 @@ static int vtbl_check(const struct ubi_device *ubi,
n = ubi->leb_size % alignment;
if (data_pad != n) {
- dbg_err("bad data_pad, has to be %d", n);
+ ubi_err("bad data_pad, has to be %d", n);
err = 6;
goto bad;
}
@@ -245,7 +240,7 @@ static int vtbl_check(const struct ubi_device *ubi,
}
if (reserved_pebs > ubi->good_peb_count) {
- dbg_err("too large reserved_pebs %d, good PEBs %d",
+ ubi_err("too large reserved_pebs %d, good PEBs %d",
reserved_pebs, ubi->good_peb_count);
err = 9;
goto bad;
@@ -275,10 +270,10 @@ static int vtbl_check(const struct ubi_device *ubi,
if (len1 > 0 && len1 == len2 &&
!strncmp(vtbl[i].name, vtbl[n].name, len1)) {
- ubi_err("volumes %d and %d have the same name"
- " \"%s\"", i, n, vtbl[i].name);
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
- ubi_dbg_dump_vtbl_record(&vtbl[n], n);
+ ubi_err("volumes %d and %d have the same name \"%s\"",
+ i, n, vtbl[i].name);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[n], n);
return -EINVAL;
}
}
@@ -288,65 +283,64 @@ static int vtbl_check(const struct ubi_device *ubi,
bad:
ubi_err("volume table check failed: record %d, error %d", i, err);
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[i], i);
return -EINVAL;
}
/**
* create_vtbl - create a copy of volume table.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @copy: number of the volume table copy
* @vtbl: contents of the volume table
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
+static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
int copy, void *vtbl)
{
int err, tries = 0;
- static struct ubi_vid_hdr *vid_hdr;
- struct ubi_scan_leb *new_seb;
+ struct ubi_vid_hdr *vid_hdr;
+ struct ubi_ainf_peb *new_aeb;
- ubi_msg("create volume table (copy #%d)", copy + 1);
+ dbg_gen("create volume table (copy #%d)", copy + 1);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vid_hdr)
return -ENOMEM;
retry:
- new_seb = ubi_scan_get_free_peb(ubi, si);
- if (IS_ERR(new_seb)) {
- err = PTR_ERR(new_seb);
+ new_aeb = ubi_early_get_peb(ubi, ai);
+ if (IS_ERR(new_aeb)) {
+ err = PTR_ERR(new_aeb);
goto out_free;
}
- vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE;
vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
vid_hdr->data_size = vid_hdr->used_ebs =
vid_hdr->data_pad = cpu_to_be32(0);
vid_hdr->lnum = cpu_to_be32(copy);
- vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
+ vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
/* The EC header is already there, write the VID header */
- err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
+ err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
if (err)
goto write_error;
/* Write the layout volume contents */
- err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size);
+ err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
if (err)
goto write_error;
/*
- * And add it to the scanning information. Don't delete the old version
- * of this LEB as it will be deleted and freed in 'ubi_scan_add_used()'.
+ * And add it to the attaching information. Don't delete the old version
+ * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
*/
- err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
- vid_hdr, 0);
- kfree(new_seb);
+ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -356,10 +350,10 @@ write_error:
* Probably this physical eraseblock went bad, try to pick
* another one.
*/
- list_add(&new_seb->u.list, &si->erase);
+ list_add(&new_aeb->u.list, &ai->erase);
goto retry;
}
- kfree(new_seb);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -369,20 +363,20 @@ out_free:
/**
* process_lvol - process the layout volume.
* @ubi: UBI device description object
- * @si: scanning information
- * @sv: layout volume scanning information
+ * @ai: attaching information
+ * @av: layout volume attaching information
*
* This function is responsible for reading the layout volume, ensuring it is
* not corrupted, and recovering from corruptions if needed. Returns volume
* table in case of success and a negative error code in case of failure.
*/
static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
- struct ubi_scan_info *si,
- struct ubi_scan_volume *sv)
+ struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av)
{
int err;
struct rb_node *rb;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
@@ -414,14 +408,14 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
dbg_gen("check layout volume");
/* Read both LEB 0 and LEB 1 into memory */
- ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- leb[seb->lnum] = vzalloc(ubi->vtbl_size);
- if (!leb[seb->lnum]) {
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
+ if (!leb[aeb->lnum]) {
err = -ENOMEM;
goto out_free;
}
- err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
+ err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
ubi->vtbl_size);
if (err == UBI_IO_BITFLIPS || err == -EBADMSG)
/*
@@ -429,12 +423,12 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
* uncorrectable ECC error, but we have our own CRC and
* the data will be checked later. If the data is OK,
* the PEB will be scrubbed (because we set
- * seb->scrub). If the data is not OK, the contents of
+ * aeb->scrub). If the data is not OK, the contents of
* the PEB will be recovered from the second copy, and
- * seb->scrub will be cleared in
- * 'ubi_scan_add_used()'.
+ * aeb->scrub will be cleared in
+ * 'ubi_add_to_av()'.
*/
- seb->scrub = 1;
+ aeb->scrub = 1;
else if (err)
goto out_free;
}
@@ -453,7 +447,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
ubi->vtbl_size);
if (leb_corrupted[1]) {
ubi_warn("volume table copy #2 is corrupted");
- err = create_vtbl(ubi, si, 1, leb[0]);
+ err = create_vtbl(ubi, ai, 1, leb[0]);
if (err)
goto out_free;
ubi_msg("volume table was restored");
@@ -476,7 +470,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
}
ubi_warn("volume table copy #1 is corrupted");
- err = create_vtbl(ubi, si, 0, leb[1]);
+ err = create_vtbl(ubi, ai, 0, leb[1]);
if (err)
goto out_free;
ubi_msg("volume table was restored");
@@ -494,13 +488,13 @@ out_free:
/**
* create_empty_lvol - create empty layout volume.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns volume table contents in case of success and a
* negative error code in case of failure.
*/
static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+ struct ubi_attach_info *ai)
{
int i;
struct ubi_vtbl_record *vtbl;
@@ -515,7 +509,7 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
int err;
- err = create_vtbl(ubi, si, i, vtbl);
+ err = create_vtbl(ubi, ai, i, vtbl);
if (err) {
vfree(vtbl);
return ERR_PTR(err);
@@ -528,18 +522,19 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
/**
* init_volumes - initialize volume information for existing volumes.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: scanning information
* @vtbl: volume table
*
* This function allocates volume description objects for existing volumes.
* Returns zero in case of success and a negative error code in case of
* failure.
*/
-static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
+static int init_volumes(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai,
const struct ubi_vtbl_record *vtbl)
{
int i, reserved_pebs = 0;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
for (i = 0; i < ubi->vtbl_slots; i++) {
@@ -567,8 +562,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
/* Auto re-size flag may be set only for one volume */
if (ubi->autoresize_vol_id != -1) {
- ubi_err("more than one auto-resize volume (%d "
- "and %d)", ubi->autoresize_vol_id, i);
+ ubi_err("more than one auto-resize volume (%d and %d)",
+ ubi->autoresize_vol_id, i);
kfree(vol);
return -EINVAL;
}
@@ -595,8 +590,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
/* Static volumes only */
- sv = ubi_scan_find_sv(si, i);
- if (!sv) {
+ av = ubi_find_av(ai, i);
+ if (!av) {
/*
* No eraseblocks belonging to this volume found. We
* don't actually know whether this static volume is
@@ -608,22 +603,22 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
continue;
}
- if (sv->leb_count != sv->used_ebs) {
+ if (av->leb_count != av->used_ebs) {
/*
* We found a static volume which misses several
* eraseblocks. Treat it as corrupted.
*/
ubi_warn("static volume %d misses %d LEBs - corrupted",
- sv->vol_id, sv->used_ebs - sv->leb_count);
+ av->vol_id, av->used_ebs - av->leb_count);
vol->corrupted = 1;
continue;
}
- vol->used_ebs = sv->used_ebs;
+ vol->used_ebs = av->used_ebs;
vol->used_bytes =
(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
- vol->used_bytes += sv->last_data_size;
- vol->last_eb_bytes = sv->last_data_size;
+ vol->used_bytes += av->last_data_size;
+ vol->last_eb_bytes = av->last_data_size;
}
/* And add the layout volume */
@@ -632,7 +627,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
return -ENOMEM;
vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
- vol->alignment = 1;
+ vol->alignment = UBI_LAYOUT_VOLUME_ALIGN;
vol->vol_type = UBI_DYNAMIC_VOLUME;
vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
@@ -664,105 +659,104 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
/**
- * check_sv - check volume scanning information.
+ * check_av - check volume attaching information.
* @vol: UBI volume description object
- * @sv: volume scanning information
+ * @av: volume attaching information
*
- * This function returns zero if the volume scanning information is consistent
+ * This function returns zero if the volume attaching information is consistent
* to the data read from the volume tabla, and %-EINVAL if not.
*/
-static int check_sv(const struct ubi_volume *vol,
- const struct ubi_scan_volume *sv)
+static int check_av(const struct ubi_volume *vol,
+ const struct ubi_ainf_volume *av)
{
int err;
- if (sv->highest_lnum >= vol->reserved_pebs) {
+ if (av->highest_lnum >= vol->reserved_pebs) {
err = 1;
goto bad;
}
- if (sv->leb_count > vol->reserved_pebs) {
+ if (av->leb_count > vol->reserved_pebs) {
err = 2;
goto bad;
}
- if (sv->vol_type != vol->vol_type) {
+ if (av->vol_type != vol->vol_type) {
err = 3;
goto bad;
}
- if (sv->used_ebs > vol->reserved_pebs) {
+ if (av->used_ebs > vol->reserved_pebs) {
err = 4;
goto bad;
}
- if (sv->data_pad != vol->data_pad) {
+ if (av->data_pad != vol->data_pad) {
err = 5;
goto bad;
}
return 0;
bad:
- ubi_err("bad scanning information, error %d", err);
- ubi_dbg_dump_sv(sv);
- ubi_dbg_dump_vol_info(vol);
+ ubi_err("bad attaching information, error %d", err);
+ ubi_dump_av(av);
+ ubi_dump_vol_info(vol);
return -EINVAL;
}
/**
- * check_scanning_info - check that scanning information.
+ * check_attaching_info - check that attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* Even though we protect on-flash data by CRC checksums, we still don't trust
- * the media. This function ensures that scanning information is consistent to
- * the information read from the volume table. Returns zero if the scanning
+ * the media. This function ensures that attaching information is consistent to
+ * the information read from the volume table. Returns zero if the attaching
* information is OK and %-EINVAL if it is not.
*/
-static int check_scanning_info(const struct ubi_device *ubi,
- struct ubi_scan_info *si)
+static int check_attaching_info(const struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
{
int err, i;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
- if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
- ubi_err("scanning found %d volumes, maximum is %d + %d",
- si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
+ if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
+ ubi_err("found %d volumes while attaching, maximum is %d + %d",
+ ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
return -EINVAL;
}
- if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
- si->highest_vol_id < UBI_INTERNAL_VOL_START) {
- ubi_err("too large volume ID %d found by scanning",
- si->highest_vol_id);
+ if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
+ ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err("too large volume ID %d found", ai->highest_vol_id);
return -EINVAL;
}
for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
cond_resched();
- sv = ubi_scan_find_sv(si, i);
+ av = ubi_find_av(ai, i);
vol = ubi->volumes[i];
if (!vol) {
- if (sv)
- ubi_scan_rm_volume(si, sv);
+ if (av)
+ ubi_remove_av(ai, av);
continue;
}
if (vol->reserved_pebs == 0) {
ubi_assert(i < ubi->vtbl_slots);
- if (!sv)
+ if (!av)
continue;
/*
- * During scanning we found a volume which does not
+ * During attaching we found a volume which does not
* exist according to the information in the volume
* table. This must have happened due to an unclean
* reboot while the volume was being removed. Discard
* these eraseblocks.
*/
- ubi_msg("finish volume %d removal", sv->vol_id);
- ubi_scan_rm_volume(si, sv);
- } else if (sv) {
- err = check_sv(vol, sv);
+ ubi_msg("finish volume %d removal", av->vol_id);
+ ubi_remove_av(ai, av);
+ } else if (av) {
+ err = check_av(vol, av);
if (err)
return err;
}
@@ -774,16 +768,16 @@ static int check_scanning_info(const struct ubi_device *ubi,
/**
* ubi_read_volume_table - read the volume table.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function reads volume table, checks it, recover from errors if needed,
* or creates it if needed. Returns zero in case of success and a negative
* error code in case of failure.
*/
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, err;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
@@ -798,8 +792,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
- sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
- if (!sv) {
+ av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID);
+ if (!av) {
/*
* No logical eraseblocks belonging to the layout volume were
* found. This could mean that the flash is just empty. In
@@ -808,8 +802,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
* But if flash is not empty this must be a corruption or the
* MTD device just contains garbage.
*/
- if (si->is_empty) {
- ubi->vtbl = create_empty_lvol(ubi, si);
+ if (ai->is_empty) {
+ ubi->vtbl = create_empty_lvol(ubi, ai);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
} else {
@@ -817,14 +811,14 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
return -EINVAL;
}
} else {
- if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) {
+ if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
/* This must not happen with proper UBI images */
- dbg_err("too many LEBs (%d) in layout volume",
- sv->leb_count);
+ ubi_err("too many LEBs (%d) in layout volume",
+ av->leb_count);
return -EINVAL;
}
- ubi->vtbl = process_lvol(ubi, si, sv);
+ ubi->vtbl = process_lvol(ubi, ai, av);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
}
@@ -835,15 +829,15 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
* The layout volume is OK, initialize the corresponding in-RAM data
* structures.
*/
- err = init_volumes(ubi, si, ubi->vtbl);
+ err = init_volumes(ubi, ai, ubi->vtbl);
if (err)
goto out_free;
/*
- * Make sure that the scanning information is consistent to the
+ * Make sure that the attaching information is consistent to the
* information stored in the volume table.
*/
- err = check_scanning_info(ubi, si);
+ err = check_attaching_info(ubi, ai);
if (err)
goto out_free;
@@ -858,21 +852,17 @@ out_free:
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_vtbl_check - check volume table.
+ * self_vtbl_check - check volume table.
* @ubi: UBI device description object
*/
-static void paranoid_vtbl_check(const struct ubi_device *ubi)
+static void self_vtbl_check(const struct ubi_device *ubi)
{
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return;
if (vtbl_check(ubi, ubi->vtbl)) {
- ubi_err("paranoid check failed");
+ ubi_err("self-check failed");
BUG();
}
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 0696e36b0539..5df49d3cb5c7 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1,5 +1,4 @@
/*
- * @ubi: UBI device description object
* Copyright (c) International Business Machines Corp., 2006
*
* This program is free software; you can redistribute it and/or modify
@@ -41,12 +40,6 @@
* physical eraseblocks with low erase counter to free physical eraseblocks
* with high erase counter.
*
- * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
- * an "optimal" physical eraseblock. For example, when it is known that the
- * physical eraseblock will be "put" soon because it contains short-term data,
- * the WL sub-system may pick a free physical eraseblock with low erase
- * counter, and so forth.
- *
* If the WL sub-system fails to erase a physical eraseblock, it marks it as
* bad.
*
@@ -70,8 +63,7 @@
* to the user; instead, we first want to let users fill them up with data;
*
* o there is a chance that the user will put the physical eraseblock very
- * soon, so it makes sense not to move it for some time, but wait; this is
- * especially important in case of "short term" physical eraseblocks.
+ * soon, so it makes sense not to move it for some time, but wait.
*
* Physical eraseblocks stay protected only for limited time. But the "time" is
* measured in erase cycles in this case. This is implemented with help of the
@@ -142,37 +134,46 @@
*/
#define WL_MAX_FAILURES 32
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root);
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
/**
- * struct ubi_work - UBI work description data structure.
- * @list: a link in the list of pending works
- * @func: worker function
- * @e: physical eraseblock to erase
- * @torture: if the physical eraseblock has to be tortured
- *
- * The @func pointer points to the worker function. If the @cancel argument is
- * not zero, the worker has to free the resources and exit immediately. The
- * worker has to return zero in case of success and a negative error code in
- * case of failure.
+ * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
+ * @wrk: the work description object
*/
-struct ubi_work {
- struct list_head list;
- int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
- /* The below fields are only relevant to erasure works */
- struct ubi_wl_entry *e;
- int torture;
-};
-
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
-static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
- struct ubi_wl_entry *e,
- struct rb_root *root);
-static int paranoid_check_in_pq(const struct ubi_device *ubi,
- struct ubi_wl_entry *e);
+static void update_fastmap_work_fn(struct work_struct *wrk)
+{
+ struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
+ ubi_update_fastmap(ubi);
+}
+
+/**
+ * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
+ * @ubi: UBI device description object
+ * @pnum: the to be checked PEB
+ */
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+ int i;
+
+ if (!ubi->fm)
+ return 0;
+
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ if (ubi->fm->e[i]->pnum == pnum)
+ return 1;
+
+ return 0;
+}
#else
-#define paranoid_check_ec(ubi, pnum, ec) 0
-#define paranoid_check_in_wl_tree(ubi, e, root)
-#define paranoid_check_in_pq(ubi, e) 0
+static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
+{
+ return 0;
+}
#endif
/**
@@ -271,18 +272,16 @@ static int produce_free_peb(struct ubi_device *ubi)
{
int err;
- spin_lock(&ubi->wl_lock);
while (!ubi->free.rb_node) {
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
err = do_work(ubi);
- if (err)
- return err;
spin_lock(&ubi->wl_lock);
+ if (err)
+ return err;
}
- spin_unlock(&ubi->wl_lock);
return 0;
}
@@ -349,19 +348,22 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
/**
* find_wl_entry - find wear-leveling entry closest to certain erase counter.
+ * @ubi: UBI device description object
* @root: the RB-tree where to look for
- * @max: highest possible erase counter
+ * @diff: maximum possible difference from the smallest erase counter
*
* This function looks for a wear leveling entry with erase counter closest to
- * @max and less than @max.
+ * min + @diff, where min is the smallest erase counter.
*/
-static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
+static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root, int diff)
{
struct rb_node *p;
- struct ubi_wl_entry *e;
+ struct ubi_wl_entry *e, *prev_e = NULL;
+ int max;
e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
- max += e->ec;
+ max = e->ec + diff;
p = root->rb_node;
while (p) {
@@ -372,39 +374,143 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
p = p->rb_left;
else {
p = p->rb_right;
+ prev_e = e;
e = e1;
}
}
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best WL entry
+ * such that fastmap can use the anchor PEB later. */
+ if (prev_e && !ubi->fm_disabled &&
+ !ubi->fm && e->pnum < UBI_FM_MAX_START)
+ return prev_e;
+
return e;
}
/**
- * ubi_wl_get_peb - get a physical eraseblock.
+ * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
* @ubi: UBI device description object
- * @dtype: type of data which will be stored in this physical eraseblock
+ * @root: the RB-tree where to look for
*
- * This function returns a physical eraseblock in case of success and a
- * negative error code in case of failure. Might sleep.
+ * This function looks for a wear leveling entry with medium erase counter,
+ * but not greater or equivalent than the lowest erase counter plus
+ * %WL_FREE_MAX_DIFF/2.
*/
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
+static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root)
{
- int err, medium_ec;
struct ubi_wl_entry *e, *first, *last;
- ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
- dtype == UBI_UNKNOWN);
+ first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
+
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
+ e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best
+ * WL entry such that fastmap can use the anchor PEB later. */
+ if (e && !ubi->fm_disabled && !ubi->fm &&
+ e->pnum < UBI_FM_MAX_START)
+ e = rb_entry(rb_next(root->rb_node),
+ struct ubi_wl_entry, u.rb);
+#endif
+ } else
+ e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+
+ return e;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
+ * @root: the RB-tree where to look for
+ */
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e, *victim = NULL;
+ int max_ec = UBI_MAX_ERASECOUNTER;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb) {
+ if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
+ victim = e;
+ max_ec = e->ec;
+ }
+ }
+
+ return victim;
+}
+
+static int anchor_pebs_avalible(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb)
+ if (e->pnum < UBI_FM_MAX_START)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @anchor: This PEB will be used as anchor PEB by fastmap
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+{
+ struct ubi_wl_entry *e = NULL;
+
+ if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ goto out;
+
+ if (anchor)
+ e = find_anchor_wl_entry(&ubi->free);
+ else
+ e = find_mean_wl_entry(ubi, &ubi->free);
+
+ if (!e)
+ goto out;
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /* remove it from the free list,
+ * the wl subsystem does no longer know this erase block */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+out:
+ return e;
+}
+#endif
+
+/**
+ * __wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ */
+static int __wl_get_peb(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_wl_entry *e;
retry:
- spin_lock(&ubi->wl_lock);
if (!ubi->free.rb_node) {
if (ubi->works_count == 0) {
- ubi_assert(list_empty(&ubi->works));
ubi_err("no free eraseblocks");
- spin_unlock(&ubi->wl_lock);
+ ubi_assert(list_empty(&ubi->works));
return -ENOSPC;
}
- spin_unlock(&ubi->wl_lock);
err = produce_free_peb(ubi);
if (err < 0)
@@ -412,66 +518,186 @@ retry:
goto retry;
}
- switch (dtype) {
- case UBI_LONGTERM:
- /*
- * For long term data we pick a physical eraseblock with high
- * erase counter. But the highest erase counter we can pick is
- * bounded by the the lowest erase counter plus
- * %WL_FREE_MAX_DIFF.
- */
- e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- break;
- case UBI_UNKNOWN:
- /*
- * For unknown data we pick a physical eraseblock with medium
- * erase counter. But we by no means can pick a physical
- * eraseblock with erase counter greater or equivalent than the
- * lowest erase counter plus %WL_FREE_MAX_DIFF.
- */
- first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
- u.rb);
- last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
-
- if (last->ec - first->ec < WL_FREE_MAX_DIFF)
- e = rb_entry(ubi->free.rb_node,
- struct ubi_wl_entry, u.rb);
- else {
- medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
- e = find_wl_entry(&ubi->free, medium_ec);
- }
- break;
- case UBI_SHORTTERM:
- /*
- * For short term data we pick a physical eraseblock with the
- * lowest erase counter as we expect it will be erased soon.
- */
- e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
- break;
- default:
- BUG();
+ e = find_mean_wl_entry(ubi, &ubi->free);
+ if (!e) {
+ ubi_err("no free eraseblocks");
+ return -ENOSPC;
}
- paranoid_check_in_wl_tree(ubi, e, &ubi->free);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
/*
* Move the physical eraseblock to the protection queue where it will
* be protected from being moved for some time.
*/
rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+#ifndef CONFIG_MTD_UBI_FASTMAP
+ /* We have to enqueue e only if fastmap is disabled,
+ * is fastmap enabled prot_queue_add() will be called by
+ * ubi_wl_get_peb() after removing e from the pool. */
prot_queue_add(ubi, e);
+#endif
+ return e->pnum;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+ * @pool: fastmap pool description object
+ */
+static void return_unused_pool_pebs(struct ubi_device *ubi,
+ struct ubi_fm_pool *pool)
+{
+ int i;
+ struct ubi_wl_entry *e;
+
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+}
+
+/**
+ * refill_wl_pool - refills all the fastmap pool used by the
+ * WL sub-system.
+ * @ubi: UBI device description object
+ */
+static void refill_wl_pool(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+
+ return_unused_pool_pebs(ubi, pool);
+
+ for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ break;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+
+ pool->pebs[pool->size] = e->pnum;
+ }
+ pool->used = 0;
+}
+
+/**
+ * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
+ * @ubi: UBI device description object
+ */
+static void refill_wl_user_pool(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+
+ return_unused_pool_pebs(ubi, pool);
+
+ for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ break;
+
+ pool->pebs[pool->size] = __wl_get_peb(ubi);
+ if (pool->pebs[pool->size] < 0)
+ break;
+ }
+ pool->used = 0;
+}
+
+/**
+ * ubi_refill_pools - refills all fastmap PEB pools.
+ * @ubi: UBI device description object
+ */
+void ubi_refill_pools(struct ubi_device *ubi)
+{
+ spin_lock(&ubi->wl_lock);
+ refill_wl_pool(ubi);
+ refill_wl_user_pool(ubi);
+ spin_unlock(&ubi->wl_lock);
+}
+
+/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
+ * the fastmap pool.
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int ret;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+
+ if (!pool->size || !wl_pool->size || pool->used == pool->size ||
+ wl_pool->used == wl_pool->size)
+ ubi_update_fastmap(ubi);
+
+ /* we got not a single free PEB */
+ if (!pool->size)
+ ret = -ENOSPC;
+ else {
+ spin_lock(&ubi->wl_lock);
+ ret = pool->pebs[pool->used++];
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ spin_unlock(&ubi->wl_lock);
+ }
+
+ return ret;
+}
+
+/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ if (pool->used == pool->size || !pool->size) {
+ /* We cannot update the fastmap here because this
+ * function is called in atomic context.
+ * Let's fail here and refill/update it as soon as possible. */
+ schedule_work(&ubi->fm_work);
+ return NULL;
+ } else {
+ pnum = pool->pebs[pool->used++];
+ return ubi->lookuptbl[pnum];
+ }
+}
+#else
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+
+ return e;
+}
+
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int peb, err;
+
+ spin_lock(&ubi->wl_lock);
+ peb = __wl_get_peb(ubi);
spin_unlock(&ubi->wl_lock);
- err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
- ubi->peb_size - ubi->vid_hdr_aloffset);
+ err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
+ ubi->peb_size - ubi->vid_hdr_aloffset);
if (err) {
- ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
+ ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
return err;
}
- return e->pnum;
+ return peb;
}
+#endif
/**
* prot_queue_del - remove a physical eraseblock from the protection queue.
@@ -489,7 +715,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
if (!e)
return -ENODEV;
- if (paranoid_check_in_pq(ubi, e))
+ if (self_check_in_pq(ubi, e))
return -ENODEV;
list_del(&e->u.list);
@@ -515,7 +741,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
- err = paranoid_check_ec(ubi, e->pnum, e->ec);
+ err = self_check_ec(ubi, e->pnum, e->ec);
if (err)
return -EINVAL;
@@ -603,14 +829,14 @@ repeat:
}
/**
- * schedule_ubi_work - schedule a work.
+ * __schedule_ubi_work - schedule a work.
* @ubi: UBI device description object
* @wrk: the work to schedule
*
* This function adds a work defined by @wrk to the tail of the pending works
- * list.
+ * list. Can only be used of ubi->work_sem is already held in read mode!
*/
-static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{
spin_lock(&ubi->wl_lock);
list_add_tail(&wrk->list, &ubi->works);
@@ -621,23 +847,54 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
spin_unlock(&ubi->wl_lock);
}
+/**
+ * schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list.
+ */
+static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ down_read(&ubi->work_sem);
+ __schedule_ubi_work(ubi, wrk);
+ up_read(&ubi->work_sem);
+}
+
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_is_erase_work - checks whether a work is erase work.
+ * @wrk: The work object to be checked
+ */
+int ubi_is_erase_work(struct ubi_work *wrk)
+{
+ return wrk->func == erase_worker;
+}
+#endif
+
/**
* schedule_erase - schedule an erase work.
* @ubi: UBI device description object
* @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
*/
static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
- int torture)
+ int vol_id, int lnum, int torture)
{
struct ubi_work *wl_wrk;
+ ubi_assert(e);
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
e->pnum, e->ec, torture);
@@ -647,6 +904,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
wl_wrk->func = &erase_worker;
wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
schedule_ubi_work(ubi, wl_wrk);
@@ -654,6 +913,79 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
}
/**
+ * do_sync_erase - run the erase worker synchronously.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ */
+static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int vol_id, int lnum, int torture)
+{
+ struct ubi_work *wl_wrk;
+
+ dbg_wl("sync erase of PEB %i", e->pnum);
+
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wl_wrk)
+ return -ENOMEM;
+
+ wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
+ wl_wrk->torture = torture;
+
+ return erase_worker(ubi, wl_wrk, 0);
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ * see: ubi_wl_put_peb()
+ *
+ * @ubi: UBI device description object
+ * @fm_e: physical eraseblock to return
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if this physical eraseblock has to be tortured
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+ int lnum, int torture)
+{
+ struct ubi_wl_entry *e;
+ int vol_id, pnum = fm_e->pnum;
+
+ dbg_wl("PEB %d", pnum);
+
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+
+ /* This can happen if we recovered from a fastmap the very
+ * first time and writing now a new one. In this case the wl system
+ * has never seen any PEB used by the original fastmap.
+ */
+ if (!e) {
+ e = fm_e;
+ ubi_assert(e->ec >= 0);
+ ubi->lookuptbl[pnum] = e;
+ } else {
+ e->ec = fm_e->ec;
+ kfree(fm_e);
+ }
+
+ spin_unlock(&ubi->wl_lock);
+
+ vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+ return schedule_erase(ubi, e, vol_id, lnum, torture);
+}
+#endif
+
+/**
* wear_leveling_worker - wear-leveling worker function.
* @ubi: UBI device description object
* @wrk: the work object
@@ -668,6 +1000,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int vol_id = -1, uninitialized_var(lnum);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ int anchor = wrk->anchor;
+#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
@@ -701,21 +1036,42 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_cancel;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Check whether we need to produce an anchor PEB */
+ if (!anchor)
+ anchor = !anchor_pebs_avalible(&ubi->free);
+
+ if (anchor) {
+ e1 = find_anchor_wl_entry(&ubi->used);
+ if (!e1)
+ goto out_cancel;
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ } else if (!ubi->scrub.rb_node) {
+#else
if (!ubi->scrub.rb_node) {
+#endif
/*
* Now pick the least worn-out used physical eraseblock and a
* highly worn-out free physical eraseblock. If the erase
* counters differ much enough, start wear-leveling.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
e1->ec, e2->ec);
goto out_cancel;
}
- paranoid_check_in_wl_tree(ubi, e1, &ubi->used);
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
@@ -723,14 +1079,15 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/* Perform scrubbing */
scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- paranoid_check_in_wl_tree(ubi, e1, &ubi->scrub);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->scrub);
rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
- paranoid_check_in_wl_tree(ubi, e2, &ubi->free);
- rb_erase(&e2->u.rb, &ubi->free);
ubi->move_from = e1;
ubi->move_to = e2;
spin_unlock(&ubi->wl_lock);
@@ -799,7 +1156,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
scrubbing = 1;
goto out_not_moved;
}
- if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+ if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
err == MOVE_TARGET_RD_ERR) {
/*
* Target PEB had bit-flips or write error - torture it.
@@ -847,7 +1204,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e1, 0);
+ err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e1);
if (e2)
@@ -862,7 +1219,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
e2->pnum, vol_id, lnum);
- err = schedule_erase(ubi, e2, 0);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
@@ -901,7 +1258,7 @@ out_not_moved:
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
- err = schedule_erase(ubi, e2, torture);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
@@ -942,12 +1299,13 @@ out_cancel:
/**
* ensure_wear_leveling - schedule wear-leveling if it is needed.
* @ubi: UBI device description object
+ * @nested: set to non-zero if this function is called from UBI worker
*
* This function checks if it is time to start wear-leveling and schedules it
* if yes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
-static int ensure_wear_leveling(struct ubi_device *ubi)
+static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
{
int err = 0;
struct ubi_wl_entry *e1;
@@ -975,7 +1333,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
* %UBI_WL_THRESHOLD.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
- e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
+ e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
@@ -992,8 +1350,12 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
goto out_cancel;
}
+ wrk->anchor = 0;
wrk->func = &wear_leveling_worker;
- schedule_ubi_work(ubi, wrk);
+ if (nested)
+ __schedule_ubi_work(ubi, wrk);
+ else
+ schedule_ubi_work(ubi, wrk);
return err;
out_cancel:
@@ -1004,6 +1366,38 @@ out_unlock:
return err;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
+ * @ubi: UBI device description object
+ */
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+{
+ struct ubi_work *wrk;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->wl_scheduled) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ return -ENOMEM;
+ }
+
+ wrk->anchor = 1;
+ wrk->func = &wear_leveling_worker;
+ schedule_ubi_work(ubi, wrk);
+ return 0;
+}
+#endif
+
/**
* erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
@@ -1019,7 +1413,10 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel)
{
struct ubi_wl_entry *e = wl_wrk->e;
- int pnum = e->pnum, err, need;
+ int pnum = e->pnum;
+ int vol_id = wl_wrk->vol_id;
+ int lnum = wl_wrk->lnum;
+ int err, available_consumed = 0;
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1028,7 +1425,10 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
return 0;
}
- dbg_wl("erase PEB %d EC %d", pnum, e->ec);
+ dbg_wl("erase PEB %d EC %d LEB %d:%d",
+ pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
+
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
@@ -1037,6 +1437,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
spin_unlock(&ubi->wl_lock);
/*
@@ -1046,7 +1447,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
serve_prot_queue(ubi);
/* And take care about wear-leveling */
- err = ensure_wear_leveling(ubi);
+ err = ensure_wear_leveling(ubi, 1);
return err;
}
@@ -1058,7 +1459,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, 0);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
if (err1) {
err = err1;
goto out_ro;
@@ -1083,20 +1484,14 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
}
spin_lock(&ubi->volumes_lock);
- need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
- if (need > 0) {
- need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
- ubi->avail_pebs -= need;
- ubi->rsvd_pebs += need;
- ubi->beb_rsvd_pebs += need;
- if (need > 0)
- ubi_msg("reserve more %d PEBs", need);
- }
-
if (ubi->beb_rsvd_pebs == 0) {
- spin_unlock(&ubi->volumes_lock);
- ubi_err("no reserved physical eraseblocks");
- goto out_ro;
+ if (ubi->avail_pebs == 0) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_err("no reserved/available physical eraseblocks");
+ goto out_ro;
+ }
+ ubi->avail_pebs -= 1;
+ available_consumed = 1;
}
spin_unlock(&ubi->volumes_lock);
@@ -1106,19 +1501,36 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
goto out_ro;
spin_lock(&ubi->volumes_lock);
- ubi->beb_rsvd_pebs -= 1;
+ if (ubi->beb_rsvd_pebs > 0) {
+ if (available_consumed) {
+ /*
+ * The amount of reserved PEBs increased since we last
+ * checked.
+ */
+ ubi->avail_pebs += 1;
+ available_consumed = 0;
+ }
+ ubi->beb_rsvd_pebs -= 1;
+ }
ubi->bad_peb_count += 1;
ubi->good_peb_count -= 1;
ubi_calculate_reserved(ubi);
- if (ubi->beb_rsvd_pebs)
+ if (available_consumed)
+ ubi_warn("no PEBs in the reserved pool, used an available PEB");
+ else if (ubi->beb_rsvd_pebs)
ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
else
- ubi_warn("last PEB from the reserved pool was used");
+ ubi_warn("last PEB from the reserve was used");
spin_unlock(&ubi->volumes_lock);
return err;
out_ro:
+ if (available_consumed) {
+ spin_lock(&ubi->volumes_lock);
+ ubi->avail_pebs += 1;
+ spin_unlock(&ubi->volumes_lock);
+ }
ubi_ro_mode(ubi);
return err;
}
@@ -1126,6 +1538,8 @@ out_ro:
/**
* ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
* @ubi: UBI device description object
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
* @pnum: physical eraseblock to return
* @torture: if this physical eraseblock has to be tortured
*
@@ -1134,7 +1548,8 @@ out_ro:
* occurred to this @pnum and it has to be tested. This function returns zero
* in case of success, and a negative error code in case of failure.
*/
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture)
{
int err;
struct ubi_wl_entry *e;
@@ -1176,13 +1591,13 @@ retry:
return 0;
} else {
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->used);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else if (in_wl_tree(e, &ubi->scrub)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->scrub);
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
rb_erase(&e->u.rb, &ubi->scrub);
} else if (in_wl_tree(e, &ubi->erroneous)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->erroneous);
+ self_check_in_wl_tree(ubi, e, &ubi->erroneous);
rb_erase(&e->u.rb, &ubi->erroneous);
ubi->erroneous_peb_count -= 1;
ubi_assert(ubi->erroneous_peb_count >= 0);
@@ -1200,7 +1615,7 @@ retry:
}
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e, torture);
+ err = schedule_erase(ubi, e, vol_id, lnum, torture);
if (err) {
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->used);
@@ -1224,7 +1639,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
{
struct ubi_wl_entry *e;
- dbg_msg("schedule PEB %d for scrubbing", pnum);
+ ubi_msg("schedule PEB %d for scrubbing", pnum);
retry:
spin_lock(&ubi->wl_lock);
@@ -1249,7 +1664,7 @@ retry:
}
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->used);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else {
int err;
@@ -1270,29 +1685,60 @@ retry:
* Technically scrubbing is the same as wear-leveling, so it is done
* by the WL worker.
*/
- return ensure_wear_leveling(ubi);
+ return ensure_wear_leveling(ubi, 0);
}
/**
* ubi_wl_flush - flush all pending works.
* @ubi: UBI device description object
+ * @vol_id: the volume id to flush for
+ * @lnum: the logical eraseblock number to flush for
*
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function executes all pending works for a particular volume id /
+ * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
+ * acts as a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
*/
-int ubi_wl_flush(struct ubi_device *ubi)
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
{
- int err;
+ int err = 0;
+ int found = 1;
/*
* Erase while the pending works queue is not empty, but not more than
* the number of currently pending works.
*/
- dbg_wl("flush (%d pending works)", ubi->works_count);
- while (ubi->works_count) {
- err = do_work(ubi);
- if (err)
- return err;
+ dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
+ vol_id, lnum, ubi->works_count);
+
+ while (found) {
+ struct ubi_work *wrk;
+ found = 0;
+
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ list_for_each_entry(wrk, &ubi->works, list) {
+ if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
+ (lnum == UBI_ALL || wrk->lnum == lnum)) {
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ spin_unlock(&ubi->wl_lock);
+
+ err = wrk->func(ubi, wrk, 0);
+ if (err) {
+ up_read(&ubi->work_sem);
+ return err;
+ }
+
+ spin_lock(&ubi->wl_lock);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
}
/*
@@ -1302,18 +1748,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
down_write(&ubi->work_sem);
up_write(&ubi->work_sem);
- /*
- * And in case last was the WL worker and it canceled the LEB
- * movement, flush again.
- */
- while (ubi->works_count) {
- dbg_wl("flush more (%d pending works)", ubi->works_count);
- err = do_work(ubi);
- if (err)
- return err;
- }
-
- return 0;
+ return err;
}
/**
@@ -1422,27 +1857,30 @@ static void cancel_pending(struct ubi_device *ubi)
}
/**
- * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
+ * ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns zero in case of success, and a negative error code in
* case of failure.
*/
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- int err, i;
+ int err, i, reserved_pebs, found_pebs = 0;
struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb, *tmp;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp;
struct ubi_wl_entry *e;
ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
mutex_init(&ubi->move_mutex);
init_rwsem(&ubi->work_sem);
- ubi->max_ec = si->max_ec;
+ ubi->max_ec = ai->max_ec;
INIT_LIST_HEAD(&ubi->works);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
+#endif
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
@@ -1455,48 +1893,59 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
INIT_LIST_HEAD(&ubi->pq[i]);
ubi->pq_head = 0;
- list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
+ list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, 0)) {
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
+
+ found_pebs++;
}
- list_for_each_entry(seb, &si->free, u.list) {
+ ubi->free_count = 0;
+ list_for_each_entry(aeb, &ai->free, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi_assert(e->ec >= 0);
+ ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
+
wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+
ubi->lookuptbl[e->pnum] = e;
+
+ found_pebs++;
}
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
- if (!seb->scrub) {
+
+ if (!aeb->scrub) {
dbg_wl("add PEB %d EC %d to the used tree",
e->pnum, e->ec);
wl_tree_add(e, &ubi->used);
@@ -1505,22 +1954,38 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->pnum, e->ec);
wl_tree_add(e, &ubi->scrub);
}
+
+ found_pebs++;
}
}
- if (ubi->avail_pebs < WL_RESERVED_PEBS) {
+ dbg_wl("found %i PEBs", found_pebs);
+
+ if (ubi->fm)
+ ubi_assert(ubi->good_peb_count == \
+ found_pebs + ubi->fm->used_blocks);
+ else
+ ubi_assert(ubi->good_peb_count == found_pebs);
+
+ reserved_pebs = WL_RESERVED_PEBS;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Reserve enough LEBs to store two fastmaps. */
+ reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
+#endif
+
+ if (ubi->avail_pebs < reserved_pebs) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
- ubi->avail_pebs, WL_RESERVED_PEBS);
+ ubi->avail_pebs, reserved_pebs);
if (ubi->corr_peb_count)
ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
goto out_free;
}
- ubi->avail_pebs -= WL_RESERVED_PEBS;
- ubi->rsvd_pebs += WL_RESERVED_PEBS;
+ ubi->avail_pebs -= reserved_pebs;
+ ubi->rsvd_pebs += reserved_pebs;
/* Schedule wear-leveling if needed */
- err = ensure_wear_leveling(ubi);
+ err = ensure_wear_leveling(ubi, 0);
if (err)
goto out_free;
@@ -1568,10 +2033,8 @@ void ubi_wl_close(struct ubi_device *ubi)
kfree(ubi->lookuptbl);
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
+ * self_check_ec - make sure that the erase counter of a PEB is correct.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @ec: the erase counter to check
@@ -1580,13 +2043,13 @@ void ubi_wl_close(struct ubi_device *ubi)
* is equivalent to @ec, and a negative error code if not or if an error
* occurred.
*/
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
{
int err;
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1601,10 +2064,10 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
}
read_ec = be64_to_cpu(ec_hdr->ec);
- if (ec != read_ec) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ if (ec != read_ec && read_ec - ec > 1) {
+ ubi_err("self-check failed for PEB %d", pnum);
ubi_err("read EC is %lld, should be %d", read_ec, ec);
- ubi_dbg_dump_stack();
+ dump_stack();
err = 1;
} else
err = 0;
@@ -1615,7 +2078,7 @@ out_free:
}
/**
- * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
* @root: the root of the tree
@@ -1623,37 +2086,36 @@ out_free:
* This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
* is not.
*/
-static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
- struct ubi_wl_entry *e,
- struct rb_root *root)
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root)
{
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
if (in_wl_tree(e, root))
return 0;
- ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
+ ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
e->pnum, e->ec, root);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_in_pq - check if wear-leveling entry is in the protection
+ * self_check_in_pq - check if wear-leveling entry is in the protection
* queue.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
*
* This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
*/
-static int paranoid_check_in_pq(const struct ubi_device *ubi,
- struct ubi_wl_entry *e)
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
{
struct ubi_wl_entry *p;
int i;
- if (!ubi->dbg->chk_gen)
+ if (!ubi_dbg_chk_gen(ubi))
return 0;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
@@ -1661,10 +2123,8 @@ static int paranoid_check_in_pq(const struct ubi_device *ubi,
if (p == e)
return 0;
- ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
+ ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
e->pnum, e->ec);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EINVAL;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index c74c4b8e71ef..812c5b935363 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2026,7 +2026,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
*/
port_id = fip->port_id;
if (fip->probe_tries)
- port_id = prandom32(&fip->rnd_state) & 0xffff;
+ port_id = prandom_u32_state(&fip->rnd_state) & 0xffff;
else if (!port_id)
port_id = fip->lp->wwpn & 0xffff;
if (!port_id || port_id == 0xffff)
@@ -2051,7 +2051,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
{
fip->probe_tries = 0;
- prandom32_seed(&fip->rnd_state, fip->lp->wwpn);
+ prandom_seed_state(&fip->rnd_state, fip->lp->wwpn);
fcoe_ctlr_vn_restart(fip);
}