summaryrefslogtreecommitdiff
path: root/include/linux/mtd/mtd.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mtd/mtd.h')
-rw-r--r--include/linux/mtd/mtd.h334
1 files changed, 255 insertions, 79 deletions
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 9f5b312af783..1a81fde8f333 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -171,87 +171,60 @@ struct mtd_info {
struct mtd_erase_region_info *eraseregions;
/*
- * Erase is an asynchronous operation. Device drivers are supposed
- * to call instr->callback() whenever the operation completes, even
- * if it completes with a failure.
- * Callers are supposed to pass a callback function and wait for it
- * to be called before writing to the block.
+ * Do not call via these pointers, use corresponding mtd_*()
+ * wrappers instead.
*/
int (*erase) (struct mtd_info *mtd, struct erase_info *instr);
-
- /* This stuff for eXecute-In-Place */
- /* phys is optional and may be set to NULL */
int (*point) (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, void **virt, resource_size_t *phys);
-
- /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+ size_t *retlen, void **virt, resource_size_t *phys);
void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
-
- /* Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
unsigned long len,
unsigned long offset,
unsigned long flags);
-
- /* Backing device capabilities for this device
- * - provides mmap capabilities
- */
- struct backing_dev_info *backing_dev_info;
-
-
- int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
- int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
-
- /* In blackbox flight recorder like scenarios we want to make successful
- writes in interrupt context. panic_write() is only intended to be
- called when its known the kernel is about to panic and we need the
- write to succeed. Since the kernel is not going to be running for much
- longer, this function can break locks and delay to ensure the write
- succeeds (but not sleep). */
-
- int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
-
+ int (*read) (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf);
+ int (*write) (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+ int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
int (*read_oob) (struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops);
int (*write_oob) (struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops);
-
- /*
- * Methods to access the protection register area, present in some
- * flash devices. The user data is one time programmable but the
- * factory data is read only.
- */
- int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
- int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
- int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
- int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
- int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
- int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len);
-
- /* kvec-based read/write methods.
- NB: The 'count' parameter is the number of _vectors_, each of
- which contains an (ofs, len) tuple.
- */
- int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen);
-
- /* Sync */
+ struct mtd_oob_ops *ops);
+ int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
+ size_t len);
+ int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf);
+ int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
+ size_t len);
+ int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf);
+ int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, u_char *buf);
+ int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len);
+ int (*writev) (struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
void (*sync) (struct mtd_info *mtd);
-
- /* Chip-supported device locking */
int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
-
- /* Power Management functions */
+ int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
+ int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
int (*suspend) (struct mtd_info *mtd);
void (*resume) (struct mtd_info *mtd);
+ /*
+ * If the driver is something smart, like UBI, it may need to maintain
+ * its own reference counting. The below functions are only for driver.
+ */
+ int (*get_device) (struct mtd_info *mtd);
+ void (*put_device) (struct mtd_info *mtd);
- /* Bad block management functions */
- int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
- int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
+ /* Backing device capabilities for this device
+ * - provides mmap capabilities
+ */
+ struct backing_dev_info *backing_dev_info;
struct notifier_block reboot_notifier; /* default mode before reboot */
@@ -265,18 +238,218 @@ struct mtd_info {
struct module *owner;
struct device dev;
int usecount;
-
- /* If the driver is something smart, like UBI, it may need to maintain
- * its own reference counting. The below functions are only for driver.
- * The driver may register its callbacks. These callbacks are not
- * supposed to be called by MTD users */
- int (*get_device) (struct mtd_info *mtd);
- void (*put_device) (struct mtd_info *mtd);
};
-static inline struct mtd_info *dev_to_mtd(struct device *dev)
+/*
+ * Erase is an asynchronous operation. Device drivers are supposed
+ * to call instr->callback() whenever the operation completes, even
+ * if it completes with a failure.
+ * Callers are supposed to pass a callback function and wait for it
+ * to be called before writing to the block.
+ */
+static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return mtd->erase(mtd, instr);
+}
+
+/*
+ * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
+ */
+static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ *retlen = 0;
+ if (!mtd->point)
+ return -EOPNOTSUPP;
+ return mtd->point(mtd, from, len, retlen, virt, phys);
+}
+
+/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+static inline void mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
- return dev ? dev_get_drvdata(dev) : NULL;
+ return mtd->unpoint(mtd, from, len);
+}
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags)
+{
+ if (!mtd->get_unmapped_area)
+ return -EOPNOTSUPP;
+ return mtd->get_unmapped_area(mtd, len, offset, flags);
+}
+
+static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ return mtd->read(mtd, from, len, retlen, buf);
+}
+
+static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->write)
+ return -EROFS;
+ return mtd->write(mtd, to, len, retlen, buf);
+}
+
+/*
+ * In blackbox flight recorder like scenarios we want to make successful writes
+ * in interrupt context. panic_write() is only intended to be called when its
+ * known the kernel is about to panic and we need the write to succeed. Since
+ * the kernel is not going to be running for much longer, this function can
+ * break locks and delay to ensure the write succeeds (but not sleep).
+ */
+static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->panic_write)
+ return -EOPNOTSUPP;
+ return mtd->panic_write(mtd, to, len, retlen, buf);
+}
+
+static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->read_oob)
+ return -EOPNOTSUPP;
+ return mtd->read_oob(mtd, from, ops);
+}
+
+static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->write_oob)
+ return -EOPNOTSUPP;
+ return mtd->write_oob(mtd, to, ops);
+}
+
+/*
+ * Method to access the protection register area, present in some flash
+ * devices. The user data is one time programmable but the factory data is read
+ * only.
+ */
+static inline int mtd_get_fact_prot_info(struct mtd_info *mtd,
+ struct otp_info *buf, size_t len)
+{
+ if (!mtd->get_fact_prot_info)
+ return -EOPNOTSUPP;
+ return mtd->get_fact_prot_info(mtd, buf, len);
+}
+
+static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->read_fact_prot_reg)
+ return -EOPNOTSUPP;
+ return mtd->read_fact_prot_reg(mtd, from, len, retlen, buf);
+}
+
+static inline int mtd_get_user_prot_info(struct mtd_info *mtd,
+ struct otp_info *buf,
+ size_t len)
+{
+ if (!mtd->get_user_prot_info)
+ return -EOPNOTSUPP;
+ return mtd->get_user_prot_info(mtd, buf, len);
+}
+
+static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->read_user_prot_reg)
+ return -EOPNOTSUPP;
+ return mtd->read_user_prot_reg(mtd, from, len, retlen, buf);
+}
+
+static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->write_user_prot_reg)
+ return -EOPNOTSUPP;
+ return mtd->write_user_prot_reg(mtd, to, len, retlen, buf);
+}
+
+static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
+{
+ if (!mtd->lock_user_prot_reg)
+ return -EOPNOTSUPP;
+ return mtd->lock_user_prot_reg(mtd, from, len);
+}
+
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+
+static inline void mtd_sync(struct mtd_info *mtd)
+{
+ if (mtd->sync)
+ mtd->sync(mtd);
+}
+
+/* Chip-supported device locking */
+static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->lock)
+ return -EOPNOTSUPP;
+ return mtd->lock(mtd, ofs, len);
+}
+
+static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->unlock)
+ return -EOPNOTSUPP;
+ return mtd->unlock(mtd, ofs, len);
+}
+
+static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->is_locked)
+ return -EOPNOTSUPP;
+ return mtd->is_locked(mtd, ofs, len);
+}
+
+static inline int mtd_suspend(struct mtd_info *mtd)
+{
+ if (!mtd->suspend)
+ return -EOPNOTSUPP;
+ return mtd->suspend(mtd);
+}
+
+static inline void mtd_resume(struct mtd_info *mtd)
+{
+ if (mtd->resume)
+ mtd->resume(mtd);
+}
+
+static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (!mtd->block_isbad)
+ return -EOPNOTSUPP;
+ return mtd->block_isbad(mtd, ofs);
+}
+
+static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (!mtd->block_markbad)
+ return -EOPNOTSUPP;
+ return mtd->block_markbad(mtd, ofs);
}
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -309,6 +482,16 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
return do_div(sz, mtd->writesize);
}
+static inline int mtd_has_oob(const struct mtd_info *mtd)
+{
+ return mtd->read_oob && mtd->write_oob;
+}
+
+static inline int mtd_can_have_bb(const struct mtd_info *mtd)
+{
+ return !!mtd->block_isbad;
+}
+
/* Kernel-side ioctl definitions */
struct mtd_partition;
@@ -338,13 +521,6 @@ struct mtd_notifier {
extern void register_mtd_user (struct mtd_notifier *new);
extern int unregister_mtd_user (struct mtd_notifier *old);
-
-int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen);
-
-int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
- unsigned long count, loff_t from, size_t *retlen);
-
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
void mtd_erase_callback(struct erase_info *instr);