Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[safe/jmp/linux-2.6] / drivers / mtd / nand / nandsim.c
index 638e6c2..cd0711b 100644 (file)
@@ -21,8 +21,6 @@
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
- *
- * $Id: nandsim.c,v 1.8 2005/03/19 15:33:56 dedekind Exp $
  */
 
 #include <linux/init.h>
@@ -30,6 +28,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
+#include <asm/div64.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/mtd/partitions.h>
 #include <linux/delay.h>
 #include <linux/list.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
 
 /* Default simulator parameters values */
 #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE)  || \
@@ -93,6 +96,14 @@ static uint log            = CONFIG_NANDSIM_LOG;
 static uint dbg            = CONFIG_NANDSIM_DBG;
 static unsigned long parts[MAX_MTD_DEVICES];
 static unsigned int parts_num;
+static char *badblocks = NULL;
+static char *weakblocks = NULL;
+static char *weakpages = NULL;
+static unsigned int bitflips = 0;
+static char *gravepages = NULL;
+static unsigned int rptwear = 0;
+static unsigned int overridesize = 0;
+static char *cache_file = NULL;
 
 module_param(first_id_byte,  uint, 0400);
 module_param(second_id_byte, uint, 0400);
@@ -108,12 +119,20 @@ module_param(do_delays,      uint, 0400);
 module_param(log,            uint, 0400);
 module_param(dbg,            uint, 0400);
 module_param_array(parts, ulong, &parts_num, 0400);
-
-MODULE_PARM_DESC(first_id_byte,  "The fist byte returned by NAND Flash 'read ID' command (manufaturer ID)");
+module_param(badblocks,      charp, 0400);
+module_param(weakblocks,     charp, 0400);
+module_param(weakpages,      charp, 0400);
+module_param(bitflips,       uint, 0400);
+module_param(gravepages,     charp, 0400);
+module_param(rptwear,        uint, 0400);
+module_param(overridesize,   uint, 0400);
+module_param(cache_file,     charp, 0400);
+
+MODULE_PARM_DESC(first_id_byte,  "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)");
 MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
 MODULE_PARM_DESC(third_id_byte,  "The third byte returned by NAND Flash 'read ID' command");
 MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
-MODULE_PARM_DESC(access_delay,   "Initial page access delay (microiseconds)");
+MODULE_PARM_DESC(access_delay,   "Initial page access delay (microseconds)");
 MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
 MODULE_PARM_DESC(erase_delay,    "Sector erase delay (milliseconds)");
 MODULE_PARM_DESC(output_cycle,   "Word output (from flash) time (nanodeconds)");
@@ -123,6 +142,23 @@ MODULE_PARM_DESC(do_delays,      "Simulate NAND delays using busy-waits if not z
 MODULE_PARM_DESC(log,            "Perform logging if not zero");
 MODULE_PARM_DESC(dbg,            "Output debug information if not zero");
 MODULE_PARM_DESC(parts,          "Partition sizes (in erase blocks) separated by commas");
+/* Page and erase block positions for the following parameters are independent of any partitions */
+MODULE_PARM_DESC(badblocks,      "Erase blocks that are initially marked bad, separated by commas");
+MODULE_PARM_DESC(weakblocks,     "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
+                                " separated by commas e.g. 113:2 means eb 113"
+                                " can be erased only twice before failing");
+MODULE_PARM_DESC(weakpages,      "Weak pages [: maximum writes (defaults to 3)]"
+                                " separated by commas e.g. 1401:2 means page 1401"
+                                " can be written only twice before failing");
+MODULE_PARM_DESC(bitflips,       "Maximum number of random bit flips per page (zero by default)");
+MODULE_PARM_DESC(gravepages,     "Pages that lose data [: maximum reads (defaults to 3)]"
+                                " separated by commas e.g. 1401:2 means page 1401"
+                                " can be read only twice before failing");
+MODULE_PARM_DESC(rptwear,        "Number of erases inbetween reporting wear, if not zero");
+MODULE_PARM_DESC(overridesize,   "Specifies the NAND Flash size overriding the ID bytes. "
+                                "The size is specified in erase blocks and as the exponent of a power of two"
+                                " e.g. 5 means a size of 32 erase blocks");
+MODULE_PARM_DESC(cache_file,     "File to use to cache nand pages instead of memory");
 
 /* The largest possible page size */
 #define NS_LARGEST_PAGE_SIZE   2048
@@ -139,6 +175,8 @@ MODULE_PARM_DESC(parts,          "Partition sizes (in erase blocks) separated by
        do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
 #define NS_ERR(args...) \
        do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
+#define NS_INFO(args...) \
+       do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
 
 /* Busy-wait delay macros (microseconds, milliseconds) */
 #define NS_UDELAY(us) \
@@ -175,13 +213,16 @@ MODULE_PARM_DESC(parts,          "Partition sizes (in erase blocks) separated by
 #define STATE_CMD_READID       0x0000000A /* read ID */
 #define STATE_CMD_ERASE2       0x0000000B /* sector erase second command */
 #define STATE_CMD_RESET        0x0000000C /* reset */
+#define STATE_CMD_RNDOUT       0x0000000D /* random output command */
+#define STATE_CMD_RNDOUTSTART  0x0000000E /* random output start command */
 #define STATE_CMD_MASK         0x0000000F /* command states mask */
 
-/* After an addres is input, the simulator goes to one of these states */
+/* After an address is input, the simulator goes to one of these states */
 #define STATE_ADDR_PAGE        0x00000010 /* full (row, column) address is accepted */
 #define STATE_ADDR_SEC         0x00000020 /* sector address was accepted */
-#define STATE_ADDR_ZERO        0x00000030 /* one byte zero address was accepted */
-#define STATE_ADDR_MASK        0x00000030 /* address states mask */
+#define STATE_ADDR_COLUMN      0x00000030 /* column address was accepted */
+#define STATE_ADDR_ZERO        0x00000040 /* one byte zero address was accepted */
+#define STATE_ADDR_MASK        0x00000070 /* address states mask */
 
 /* Durind data input/output the simulator is in these states */
 #define STATE_DATAIN           0x00000100 /* waiting for data input */
@@ -208,7 +249,7 @@ MODULE_PARM_DESC(parts,          "Partition sizes (in erase blocks) separated by
 #define ACTION_OOBOFF    0x00600000 /* add to address OOB offset */
 #define ACTION_MASK      0x00700000 /* action mask */
 
-#define NS_OPER_NUM      12 /* Number of operations supported by the simulator */
+#define NS_OPER_NUM      13 /* Number of operations supported by the simulator */
 #define NS_OPER_STATES   6  /* Maximum number of states in operation */
 
 #define OPT_ANY          0xFFFFFFFF /* any chip supports this operation */
@@ -231,6 +272,9 @@ MODULE_PARM_DESC(parts,          "Partition sizes (in erase blocks) separated by
  */
 #define NS_MAX_PREVSTATES 1
 
+/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
+#define NS_MAX_HELD_PAGES 16
+
 /*
  * A union to represent flash memory contents and flash buffer.
  */
@@ -260,16 +304,19 @@ struct nandsim {
        /* The simulated NAND flash pages array */
        union ns_mem *pages;
 
+       /* Slab allocator for nand pages */
+       struct kmem_cache *nand_pages_slab;
+
        /* Internal buffer of page + OOB size bytes */
        union ns_mem buf;
 
        /* NAND flash "geometry" */
        struct nandsin_geometry {
-               uint32_t totsz;     /* total flash size, bytes */
+               uint64_t totsz;     /* total flash size, bytes */
                uint32_t secsz;     /* flash sector (erase block) size, bytes */
                uint pgsz;          /* NAND flash page size, bytes */
                uint oobsz;         /* page OOB area size, bytes */
-               uint32_t totszoob;  /* total flash size including OOB, bytes */
+               uint64_t totszoob;  /* total flash size including OOB, bytes */
                uint pgszoob;       /* page size including OOB , bytes*/
                uint secszoob;      /* sector size including OOB, bytes */
                uint pgnum;         /* total number of pages */
@@ -300,6 +347,13 @@ struct nandsim {
                 int ale; /* address Latch Enable */
                 int wp;  /* write Protect */
         } lines;
+
+       /* Fields needed when using a cache file */
+       struct file *cfile; /* Open file */
+       unsigned char *pages_written; /* Which pages have been written */
+       void *file_buf;
+       struct page *held_pages[NS_MAX_HELD_PAGES];
+       int held_cnt;
 };
 
 /*
@@ -341,34 +395,113 @@ static struct nandsim_operations {
        {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
        /* Large page devices read page */
        {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
-                              STATE_DATAOUT, STATE_READY}}
+                              STATE_DATAOUT, STATE_READY}},
+       /* Large page devices random page read */
+       {OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
+                              STATE_DATAOUT, STATE_READY}},
+};
+
+struct weak_block {
+       struct list_head list;
+       unsigned int erase_block_no;
+       unsigned int max_erases;
+       unsigned int erases_done;
 };
 
+static LIST_HEAD(weak_blocks);
+
+struct weak_page {
+       struct list_head list;
+       unsigned int page_no;
+       unsigned int max_writes;
+       unsigned int writes_done;
+};
+
+static LIST_HEAD(weak_pages);
+
+struct grave_page {
+       struct list_head list;
+       unsigned int page_no;
+       unsigned int max_reads;
+       unsigned int reads_done;
+};
+
+static LIST_HEAD(grave_pages);
+
+static unsigned long *erase_block_wear = NULL;
+static unsigned int wear_eb_count = 0;
+static unsigned long total_wear = 0;
+static unsigned int rptwear_cnt = 0;
+
 /* MTD structure for NAND controller */
 static struct mtd_info *nsmtd;
 
 static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
 
 /*
- * Allocate array of page pointers and initialize the array to NULL
- * pointers.
+ * Allocate array of page pointers, create slab allocation for an array
+ * and initialize the array by NULL pointers.
  *
  * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
  */
 static int alloc_device(struct nandsim *ns)
 {
-       int i;
+       struct file *cfile;
+       int i, err;
+
+       if (cache_file) {
+               cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
+               if (IS_ERR(cfile))
+                       return PTR_ERR(cfile);
+               if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
+                       NS_ERR("alloc_device: cache file not readable\n");
+                       err = -EINVAL;
+                       goto err_close;
+               }
+               if (!cfile->f_op->write && !cfile->f_op->aio_write) {
+                       NS_ERR("alloc_device: cache file not writeable\n");
+                       err = -EINVAL;
+                       goto err_close;
+               }
+               ns->pages_written = vmalloc(ns->geom.pgnum);
+               if (!ns->pages_written) {
+                       NS_ERR("alloc_device: unable to allocate pages written array\n");
+                       err = -ENOMEM;
+                       goto err_close;
+               }
+               ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+               if (!ns->file_buf) {
+                       NS_ERR("alloc_device: unable to allocate file buf\n");
+                       err = -ENOMEM;
+                       goto err_free;
+               }
+               ns->cfile = cfile;
+               memset(ns->pages_written, 0, ns->geom.pgnum);
+               return 0;
+       }
 
        ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
        if (!ns->pages) {
-               NS_ERR("alloc_map: unable to allocate page array\n");
+               NS_ERR("alloc_device: unable to allocate page array\n");
                return -ENOMEM;
        }
        for (i = 0; i < ns->geom.pgnum; i++) {
                ns->pages[i].byte = NULL;
        }
+       ns->nand_pages_slab = kmem_cache_create("nandsim",
+                                               ns->geom.pgszoob, 0, 0, NULL);
+       if (!ns->nand_pages_slab) {
+               NS_ERR("cache_create: unable to create kmem_cache\n");
+               return -ENOMEM;
+       }
 
        return 0;
+
+err_free:
+       vfree(ns->pages_written);
+err_close:
+       filp_close(cfile, NULL);
+       return err;
 }
 
 /*
@@ -378,11 +511,20 @@ static void free_device(struct nandsim *ns)
 {
        int i;
 
+       if (ns->cfile) {
+               kfree(ns->file_buf);
+               vfree(ns->pages_written);
+               filp_close(ns->cfile, NULL);
+               return;
+       }
+
        if (ns->pages) {
                for (i = 0; i < ns->geom.pgnum; i++) {
                        if (ns->pages[i].byte)
-                               kfree(ns->pages[i].byte);
+                               kmem_cache_free(ns->nand_pages_slab,
+                                               ns->pages[i].byte);
                }
+               kmem_cache_destroy(ns->nand_pages_slab);
                vfree(ns->pages);
        }
 }
@@ -394,6 +536,12 @@ static char *get_partition_name(int i)
        return kstrdup(buf, GFP_KERNEL);
 }
 
+static uint64_t divide(uint64_t n, uint32_t d)
+{
+       do_div(n, d);
+       return n;
+}
+
 /*
  * Initialize the nandsim structure.
  *
@@ -404,8 +552,8 @@ static int init_nandsim(struct mtd_info *mtd)
        struct nand_chip *chip = (struct nand_chip *)mtd->priv;
        struct nandsim   *ns   = (struct nandsim *)(chip->priv);
        int i, ret = 0;
-       u_int32_t remains;
-       u_int32_t next_offset;
+       uint64_t remains;
+       uint64_t next_offset;
 
        if (NS_IS_INITIALIZED(ns)) {
                NS_ERR("init_nandsim: nandsim is already initialized\n");
@@ -422,8 +570,8 @@ static int init_nandsim(struct mtd_info *mtd)
        ns->geom.oobsz    = mtd->oobsize;
        ns->geom.secsz    = mtd->erasesize;
        ns->geom.pgszoob  = ns->geom.pgsz + ns->geom.oobsz;
-       ns->geom.pgnum    = ns->geom.totsz / ns->geom.pgsz;
-       ns->geom.totszoob = ns->geom.totsz + ns->geom.pgnum * ns->geom.oobsz;
+       ns->geom.pgnum    = divide(ns->geom.totsz, ns->geom.pgsz);
+       ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
        ns->geom.secshift = ffs(ns->geom.secsz) - 1;
        ns->geom.pgshift  = chip->page_shift;
        ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
@@ -446,7 +594,7 @@ static int init_nandsim(struct mtd_info *mtd)
        }
 
        if (ns->options & OPT_SMALLPAGE) {
-               if (ns->geom.totsz < (64 << 20)) {
+               if (ns->geom.totsz <= (32 << 20)) {
                        ns->geom.pgaddrbytes  = 3;
                        ns->geom.secaddrbytes = 2;
                } else {
@@ -472,15 +620,16 @@ static int init_nandsim(struct mtd_info *mtd)
        remains = ns->geom.totsz;
        next_offset = 0;
        for (i = 0; i < parts_num; ++i) {
-               unsigned long part = parts[i];
-               if (!part || part > remains / ns->geom.secsz) {
+               uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
+
+               if (!part_sz || part_sz > remains) {
                        NS_ERR("bad partition size.\n");
                        ret = -EINVAL;
                        goto error;
                }
                ns->partitions[i].name   = get_partition_name(i);
                ns->partitions[i].offset = next_offset;
-               ns->partitions[i].size   = part * ns->geom.secsz;
+               ns->partitions[i].size   = part_sz;
                next_offset += ns->partitions[i].size;
                remains -= ns->partitions[i].size;
        }
@@ -508,7 +657,8 @@ static int init_nandsim(struct mtd_info *mtd)
        if (ns->busw == 16)
                NS_WARN("16-bit flashes support wasn't tested\n");
 
-       printk("flash size: %u MiB\n",          ns->geom.totsz >> 20);
+       printk("flash size: %llu MiB\n",
+                       (unsigned long long)ns->geom.totsz >> 20);
        printk("page size: %u bytes\n",         ns->geom.pgsz);
        printk("OOB area size: %u bytes\n",     ns->geom.oobsz);
        printk("sector size: %u KiB\n",         ns->geom.secsz >> 10);
@@ -517,8 +667,9 @@ static int init_nandsim(struct mtd_info *mtd)
        printk("bus width: %u\n",               ns->busw);
        printk("bits in sector size: %u\n",     ns->geom.secshift);
        printk("bits in page size: %u\n",       ns->geom.pgshift);
-       printk("bits in OOB size: %u\n",        ns->geom.oobshift);
-       printk("flash size with OOB: %u KiB\n", ns->geom.totszoob >> 10);
+       printk("bits in OOB size: %u\n",        ns->geom.oobshift);
+       printk("flash size with OOB: %llu KiB\n",
+                       (unsigned long long)ns->geom.totszoob >> 10);
        printk("page address bytes: %u\n",      ns->geom.pgaddrbytes);
        printk("sector address bytes: %u\n",    ns->geom.secaddrbytes);
        printk("options: %#x\n",                ns->options);
@@ -555,6 +706,287 @@ static void free_nandsim(struct nandsim *ns)
        return;
 }
 
+static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
+{
+       char *w;
+       int zero_ok;
+       unsigned int erase_block_no;
+       loff_t offset;
+
+       if (!badblocks)
+               return 0;
+       w = badblocks;
+       do {
+               zero_ok = (*w == '0' ? 1 : 0);
+               erase_block_no = simple_strtoul(w, &w, 0);
+               if (!zero_ok && !erase_block_no) {
+                       NS_ERR("invalid badblocks.\n");
+                       return -EINVAL;
+               }
+               offset = erase_block_no * ns->geom.secsz;
+               if (mtd->block_markbad(mtd, offset)) {
+                       NS_ERR("invalid badblocks.\n");
+                       return -EINVAL;
+               }
+               if (*w == ',')
+                       w += 1;
+       } while (*w);
+       return 0;
+}
+
+static int parse_weakblocks(void)
+{
+       char *w;
+       int zero_ok;
+       unsigned int erase_block_no;
+       unsigned int max_erases;
+       struct weak_block *wb;
+
+       if (!weakblocks)
+               return 0;
+       w = weakblocks;
+       do {
+               zero_ok = (*w == '0' ? 1 : 0);
+               erase_block_no = simple_strtoul(w, &w, 0);
+               if (!zero_ok && !erase_block_no) {
+                       NS_ERR("invalid weakblocks.\n");
+                       return -EINVAL;
+               }
+               max_erases = 3;
+               if (*w == ':') {
+                       w += 1;
+                       max_erases = simple_strtoul(w, &w, 0);
+               }
+               if (*w == ',')
+                       w += 1;
+               wb = kzalloc(sizeof(*wb), GFP_KERNEL);
+               if (!wb) {
+                       NS_ERR("unable to allocate memory.\n");
+                       return -ENOMEM;
+               }
+               wb->erase_block_no = erase_block_no;
+               wb->max_erases = max_erases;
+               list_add(&wb->list, &weak_blocks);
+       } while (*w);
+       return 0;
+}
+
+static int erase_error(unsigned int erase_block_no)
+{
+       struct weak_block *wb;
+
+       list_for_each_entry(wb, &weak_blocks, list)
+               if (wb->erase_block_no == erase_block_no) {
+                       if (wb->erases_done >= wb->max_erases)
+                               return 1;
+                       wb->erases_done += 1;
+                       return 0;
+               }
+       return 0;
+}
+
+static int parse_weakpages(void)
+{
+       char *w;
+       int zero_ok;
+       unsigned int page_no;
+       unsigned int max_writes;
+       struct weak_page *wp;
+
+       if (!weakpages)
+               return 0;
+       w = weakpages;
+       do {
+               zero_ok = (*w == '0' ? 1 : 0);
+               page_no = simple_strtoul(w, &w, 0);
+               if (!zero_ok && !page_no) {
+                       NS_ERR("invalid weakpagess.\n");
+                       return -EINVAL;
+               }
+               max_writes = 3;
+               if (*w == ':') {
+                       w += 1;
+                       max_writes = simple_strtoul(w, &w, 0);
+               }
+               if (*w == ',')
+                       w += 1;
+               wp = kzalloc(sizeof(*wp), GFP_KERNEL);
+               if (!wp) {
+                       NS_ERR("unable to allocate memory.\n");
+                       return -ENOMEM;
+               }
+               wp->page_no = page_no;
+               wp->max_writes = max_writes;
+               list_add(&wp->list, &weak_pages);
+       } while (*w);
+       return 0;
+}
+
+static int write_error(unsigned int page_no)
+{
+       struct weak_page *wp;
+
+       list_for_each_entry(wp, &weak_pages, list)
+               if (wp->page_no == page_no) {
+                       if (wp->writes_done >= wp->max_writes)
+                               return 1;
+                       wp->writes_done += 1;
+                       return 0;
+               }
+       return 0;
+}
+
+static int parse_gravepages(void)
+{
+       char *g;
+       int zero_ok;
+       unsigned int page_no;
+       unsigned int max_reads;
+       struct grave_page *gp;
+
+       if (!gravepages)
+               return 0;
+       g = gravepages;
+       do {
+               zero_ok = (*g == '0' ? 1 : 0);
+               page_no = simple_strtoul(g, &g, 0);
+               if (!zero_ok && !page_no) {
+                       NS_ERR("invalid gravepagess.\n");
+                       return -EINVAL;
+               }
+               max_reads = 3;
+               if (*g == ':') {
+                       g += 1;
+                       max_reads = simple_strtoul(g, &g, 0);
+               }
+               if (*g == ',')
+                       g += 1;
+               gp = kzalloc(sizeof(*gp), GFP_KERNEL);
+               if (!gp) {
+                       NS_ERR("unable to allocate memory.\n");
+                       return -ENOMEM;
+               }
+               gp->page_no = page_no;
+               gp->max_reads = max_reads;
+               list_add(&gp->list, &grave_pages);
+       } while (*g);
+       return 0;
+}
+
+static int read_error(unsigned int page_no)
+{
+       struct grave_page *gp;
+
+       list_for_each_entry(gp, &grave_pages, list)
+               if (gp->page_no == page_no) {
+                       if (gp->reads_done >= gp->max_reads)
+                               return 1;
+                       gp->reads_done += 1;
+                       return 0;
+               }
+       return 0;
+}
+
+static void free_lists(void)
+{
+       struct list_head *pos, *n;
+       list_for_each_safe(pos, n, &weak_blocks) {
+               list_del(pos);
+               kfree(list_entry(pos, struct weak_block, list));
+       }
+       list_for_each_safe(pos, n, &weak_pages) {
+               list_del(pos);
+               kfree(list_entry(pos, struct weak_page, list));
+       }
+       list_for_each_safe(pos, n, &grave_pages) {
+               list_del(pos);
+               kfree(list_entry(pos, struct grave_page, list));
+       }
+       kfree(erase_block_wear);
+}
+
+static int setup_wear_reporting(struct mtd_info *mtd)
+{
+       size_t mem;
+
+       if (!rptwear)
+               return 0;
+       wear_eb_count = divide(mtd->size, mtd->erasesize);
+       mem = wear_eb_count * sizeof(unsigned long);
+       if (mem / sizeof(unsigned long) != wear_eb_count) {
+               NS_ERR("Too many erase blocks for wear reporting\n");
+               return -ENOMEM;
+       }
+       erase_block_wear = kzalloc(mem, GFP_KERNEL);
+       if (!erase_block_wear) {
+               NS_ERR("Too many erase blocks for wear reporting\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void update_wear(unsigned int erase_block_no)
+{
+       unsigned long wmin = -1, wmax = 0, avg;
+       unsigned long deciles[10], decile_max[10], tot = 0;
+       unsigned int i;
+
+       if (!erase_block_wear)
+               return;
+       total_wear += 1;
+       if (total_wear == 0)
+               NS_ERR("Erase counter total overflow\n");
+       erase_block_wear[erase_block_no] += 1;
+       if (erase_block_wear[erase_block_no] == 0)
+               NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
+       rptwear_cnt += 1;
+       if (rptwear_cnt < rptwear)
+               return;
+       rptwear_cnt = 0;
+       /* Calc wear stats */
+       for (i = 0; i < wear_eb_count; ++i) {
+               unsigned long wear = erase_block_wear[i];
+               if (wear < wmin)
+                       wmin = wear;
+               if (wear > wmax)
+                       wmax = wear;
+               tot += wear;
+       }
+       for (i = 0; i < 9; ++i) {
+               deciles[i] = 0;
+               decile_max[i] = (wmax * (i + 1) + 5) / 10;
+       }
+       deciles[9] = 0;
+       decile_max[9] = wmax;
+       for (i = 0; i < wear_eb_count; ++i) {
+               int d;
+               unsigned long wear = erase_block_wear[i];
+               for (d = 0; d < 10; ++d)
+                       if (wear <= decile_max[d]) {
+                               deciles[d] += 1;
+                               break;
+                       }
+       }
+       avg = tot / wear_eb_count;
+       /* Output wear report */
+       NS_INFO("*** Wear Report ***\n");
+       NS_INFO("Total numbers of erases:  %lu\n", tot);
+       NS_INFO("Number of erase blocks:   %u\n", wear_eb_count);
+       NS_INFO("Average number of erases: %lu\n", avg);
+       NS_INFO("Maximum number of erases: %lu\n", wmax);
+       NS_INFO("Minimum number of erases: %lu\n", wmin);
+       for (i = 0; i < 10; ++i) {
+               unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
+               if (from > decile_max[i])
+                       continue;
+               NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
+                       from,
+                       decile_max[i],
+                       deciles[i]);
+       }
+       NS_INFO("*** End of Wear Report ***\n");
+}
+
 /*
  * Returns the string representation of 'state' state.
  */
@@ -585,12 +1017,18 @@ static char *get_state_name(uint32_t state)
                        return "STATE_CMD_ERASE2";
                case STATE_CMD_RESET:
                        return "STATE_CMD_RESET";
+               case STATE_CMD_RNDOUT:
+                       return "STATE_CMD_RNDOUT";
+               case STATE_CMD_RNDOUTSTART:
+                       return "STATE_CMD_RNDOUTSTART";
                case STATE_ADDR_PAGE:
                        return "STATE_ADDR_PAGE";
                case STATE_ADDR_SEC:
                        return "STATE_ADDR_SEC";
                case STATE_ADDR_ZERO:
                        return "STATE_ADDR_ZERO";
+               case STATE_ADDR_COLUMN:
+                       return "STATE_ADDR_COLUMN";
                case STATE_DATAIN:
                        return "STATE_DATAIN";
                case STATE_DATAOUT:
@@ -621,6 +1059,7 @@ static int check_command(int cmd)
        switch (cmd) {
 
        case NAND_CMD_READ0:
+       case NAND_CMD_READ1:
        case NAND_CMD_READSTART:
        case NAND_CMD_PAGEPROG:
        case NAND_CMD_READOOB:
@@ -630,7 +1069,8 @@ static int check_command(int cmd)
        case NAND_CMD_READID:
        case NAND_CMD_ERASE2:
        case NAND_CMD_RESET:
-       case NAND_CMD_READ1:
+       case NAND_CMD_RNDOUT:
+       case NAND_CMD_RNDOUTSTART:
                return 0;
 
        case NAND_CMD_STATUS_MULTI:
@@ -669,6 +1109,10 @@ static uint32_t get_state_by_command(unsigned command)
                        return STATE_CMD_ERASE2;
                case NAND_CMD_RESET:
                        return STATE_CMD_RESET;
+               case NAND_CMD_RNDOUT:
+                       return STATE_CMD_RNDOUT;
+               case NAND_CMD_RNDOUTSTART:
+                       return STATE_CMD_RNDOUTSTART;
        }
 
        NS_ERR("get_state_by_command: unknown command, BUG\n");
@@ -839,6 +1283,97 @@ static int find_operation(struct nandsim *ns, uint32_t flag)
        return -1;
 }
 
+static void put_pages(struct nandsim *ns)
+{
+       int i;
+
+       for (i = 0; i < ns->held_cnt; i++)
+               page_cache_release(ns->held_pages[i]);
+}
+
+/* Get page cache pages in advance to provide NOFS memory allocation */
+static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
+{
+       pgoff_t index, start_index, end_index;
+       struct page *page;
+       struct address_space *mapping = file->f_mapping;
+
+       start_index = pos >> PAGE_CACHE_SHIFT;
+       end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+       if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
+               return -EINVAL;
+       ns->held_cnt = 0;
+       for (index = start_index; index <= end_index; index++) {
+               page = find_get_page(mapping, index);
+               if (page == NULL) {
+                       page = find_or_create_page(mapping, index, GFP_NOFS);
+                       if (page == NULL) {
+                               write_inode_now(mapping->host, 1);
+                               page = find_or_create_page(mapping, index, GFP_NOFS);
+                       }
+                       if (page == NULL) {
+                               put_pages(ns);
+                               return -ENOMEM;
+                       }
+                       unlock_page(page);
+               }
+               ns->held_pages[ns->held_cnt++] = page;
+       }
+       return 0;
+}
+
+static int set_memalloc(void)
+{
+       if (current->flags & PF_MEMALLOC)
+               return 0;
+       current->flags |= PF_MEMALLOC;
+       return 1;
+}
+
+static void clear_memalloc(int memalloc)
+{
+       if (memalloc)
+               current->flags &= ~PF_MEMALLOC;
+}
+
+static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+{
+       mm_segment_t old_fs;
+       ssize_t tx;
+       int err, memalloc;
+
+       err = get_pages(ns, file, count, *pos);
+       if (err)
+               return err;
+       old_fs = get_fs();
+       set_fs(get_ds());
+       memalloc = set_memalloc();
+       tx = vfs_read(file, (char __user *)buf, count, pos);
+       clear_memalloc(memalloc);
+       set_fs(old_fs);
+       put_pages(ns);
+       return tx;
+}
+
+static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t *pos)
+{
+       mm_segment_t old_fs;
+       ssize_t tx;
+       int err, memalloc;
+
+       err = get_pages(ns, file, count, *pos);
+       if (err)
+               return err;
+       old_fs = get_fs();
+       set_fs(get_ds());
+       memalloc = set_memalloc();
+       tx = vfs_write(file, (char __user *)buf, count, pos);
+       clear_memalloc(memalloc);
+       set_fs(old_fs);
+       put_pages(ns);
+       return tx;
+}
+
 /*
  * Returns a pointer to the current page.
  */
@@ -855,6 +1390,38 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
        return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
 }
 
+int do_read_error(struct nandsim *ns, int num)
+{
+       unsigned int page_no = ns->regs.row;
+
+       if (read_error(page_no)) {
+               int i;
+               memset(ns->buf.byte, 0xFF, num);
+               for (i = 0; i < num; ++i)
+                       ns->buf.byte[i] = random32();
+               NS_WARN("simulating read error in page %u\n", page_no);
+               return 1;
+       }
+       return 0;
+}
+
+void do_bit_flips(struct nandsim *ns, int num)
+{
+       if (bitflips && random32() < (1 << 22)) {
+               int flips = 1;
+               if (bitflips > 1)
+                       flips = (random32() % (int) bitflips) + 1;
+               while (flips--) {
+                       int pos = random32() % (num * 8);
+                       ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
+                       NS_WARN("read_page: flipping bit %d in page %d "
+                               "reading from %d ecc: corrected=%u failed=%u\n",
+                               pos, ns->regs.row, ns->regs.column + ns->regs.off,
+                               nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
+               }
+       }
+}
+
 /*
  * Fill the NAND buffer with data read from the specified page.
  */
@@ -862,6 +1429,29 @@ static void read_page(struct nandsim *ns, int num)
 {
        union ns_mem *mypage;
 
+       if (ns->cfile) {
+               if (!ns->pages_written[ns->regs.row]) {
+                       NS_DBG("read_page: page %d not written\n", ns->regs.row);
+                       memset(ns->buf.byte, 0xFF, num);
+               } else {
+                       loff_t pos;
+                       ssize_t tx;
+
+                       NS_DBG("read_page: page %d written, reading from %d\n",
+                               ns->regs.row, ns->regs.column + ns->regs.off);
+                       if (do_read_error(ns, num))
+                               return;
+                       pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
+                       tx = read_file(ns, ns->cfile, ns->buf.byte, num, &pos);
+                       if (tx != num) {
+                               NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+                               return;
+                       }
+                       do_bit_flips(ns, num);
+               }
+               return;
+       }
+
        mypage = NS_GET_PAGE(ns);
        if (mypage->byte == NULL) {
                NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
@@ -869,7 +1459,10 @@ static void read_page(struct nandsim *ns, int num)
        } else {
                NS_DBG("read_page: page %d allocated, reading from %d\n",
                        ns->regs.row, ns->regs.column + ns->regs.off);
+               if (do_read_error(ns, num))
+                       return;
                memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
+               do_bit_flips(ns, num);
        }
 }
 
@@ -881,11 +1474,20 @@ static void erase_sector(struct nandsim *ns)
        union ns_mem *mypage;
        int i;
 
+       if (ns->cfile) {
+               for (i = 0; i < ns->geom.pgsec; i++)
+                       if (ns->pages_written[ns->regs.row + i]) {
+                               NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
+                               ns->pages_written[ns->regs.row + i] = 0;
+                       }
+               return;
+       }
+
        mypage = NS_GET_PAGE(ns);
        for (i = 0; i < ns->geom.pgsec; i++) {
                if (mypage->byte != NULL) {
                        NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
-                       kfree(mypage->byte);
+                       kmem_cache_free(ns->nand_pages_slab, mypage->byte);
                        mypage->byte = NULL;
                }
                mypage++;
@@ -901,10 +1503,57 @@ static int prog_page(struct nandsim *ns, int num)
        union ns_mem *mypage;
        u_char *pg_off;
 
+       if (ns->cfile) {
+               loff_t off, pos;
+               ssize_t tx;
+               int all;
+
+               NS_DBG("prog_page: writing page %d\n", ns->regs.row);
+               pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
+               off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
+               if (!ns->pages_written[ns->regs.row]) {
+                       all = 1;
+                       memset(ns->file_buf, 0xff, ns->geom.pgszoob);
+               } else {
+                       all = 0;
+                       pos = off;
+                       tx = read_file(ns, ns->cfile, pg_off, num, &pos);
+                       if (tx != num) {
+                               NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+                               return -1;
+                       }
+               }
+               for (i = 0; i < num; i++)
+                       pg_off[i] &= ns->buf.byte[i];
+               if (all) {
+                       pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+                       tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, &pos);
+                       if (tx != ns->geom.pgszoob) {
+                               NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+                               return -1;
+                       }
+                       ns->pages_written[ns->regs.row] = 1;
+               } else {
+                       pos = off;
+                       tx = write_file(ns, ns->cfile, pg_off, num, &pos);
+                       if (tx != num) {
+                               NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+                               return -1;
+                       }
+               }
+               return 0;
+       }
+
        mypage = NS_GET_PAGE(ns);
        if (mypage->byte == NULL) {
                NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
-               mypage->byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+               /*
+                * We allocate memory with GFP_NOFS because a flash FS may
+                * utilize this. If it is holding an FS lock, then gets here,
+                * then kernel memory alloc runs writeback which goes to the FS
+                * again and deadlocks. This was seen in practice.
+                */
+               mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
                if (mypage->byte == NULL) {
                        NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
                        return -1;
@@ -928,6 +1577,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
 {
        int num;
        int busdiv = ns->busw == 8 ? 1 : 2;
+       unsigned int erase_block_no, page_no;
 
        action &= ACTION_MASK;
 
@@ -987,14 +1637,24 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
                                8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
                ns->regs.column = 0;
 
+               erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
+
                NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
                                ns->regs.row, NS_RAW_OFFSET(ns));
-               NS_LOG("erase sector %d\n", ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift));
+               NS_LOG("erase sector %u\n", erase_block_no);
 
                erase_sector(ns);
 
                NS_MDELAY(erase_delay);
 
+               if (erase_block_wear)
+                       update_wear(erase_block_no);
+
+               if (erase_error(erase_block_no)) {
+                       NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
+                       return -1;
+               }
+
                break;
 
        case ACTION_PRGPAGE:
@@ -1017,6 +1677,8 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
                if (prog_page(ns, num) == -1)
                        return -1;
 
+               page_no = ns->regs.row;
+
                NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
                        num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
                NS_LOG("programm page %d\n", ns->regs.row);
@@ -1024,6 +1686,11 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
                NS_UDELAY(programm_delay);
                NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
 
+               if (write_error(page_no)) {
+                       NS_WARN("simulating write failure in page %u\n", page_no);
+                       return -1;
+               }
+
                break;
 
        case ACTION_ZEROOFF:
@@ -1184,6 +1851,11 @@ static void switch_state(struct nandsim *ns)
                                ns->regs.num = 1;
                                break;
 
+                       case STATE_ADDR_COLUMN:
+                               /* Column address is always 2 bytes */
+                               ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
+                               break;
+
                        default:
                                NS_ERR("switch_state: BUG! unknown address state\n");
                }
@@ -1295,34 +1967,38 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
                        return;
                }
 
-               /*
-                * Chip might still be in STATE_DATAOUT
-                * (if OPT_AUTOINCR feature is supported), STATE_DATAOUT_STATUS or
-                * STATE_DATAOUT_STATUS_M state. If so, switch state.
-                */
+               /* Check that the command byte is correct */
+               if (check_command(byte)) {
+                       NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
+                       return;
+               }
+
                if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
                        || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
-                       || ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT))
+                       || NS_STATE(ns->state) == STATE_DATAOUT) {
+                       int row = ns->regs.row;
+
                        switch_state(ns);
+                       if (byte == NAND_CMD_RNDOUT)
+                               ns->regs.row = row;
+               }
 
                /* Check if chip is expecting command */
                if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
-                       /*
-                        * We are in situation when something else (not command)
-                        * was expected but command was input. In this case ignore
-                        * previous command(s)/state(s) and accept the last one.
-                        */
-                       NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
-                               "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+                       /* Do not warn if only 2 id bytes are read */
+                       if (!(ns->regs.command == NAND_CMD_READID &&
+                           NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
+                               /*
+                                * We are in situation when something else (not command)
+                                * was expected but command was input. In this case ignore
+                                * previous command(s)/state(s) and accept the last one.
+                                */
+                               NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
+                                       "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+                       }
                        switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
                }
 
-               /* Check that the command byte is correct */
-               if (check_command(byte)) {
-                       NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
-                       return;
-               }
-
                NS_DBG("command byte corresponding to %s state accepted\n",
                        get_state_name(get_state_by_command(byte)));
                ns->regs.command = byte;
@@ -1578,6 +2254,8 @@ static int __init ns_init_module(void)
        chip->verify_buf = ns_nand_verify_buf;
        chip->read_word  = ns_nand_read_word;
        chip->ecc.mode   = NAND_ECC_SOFT;
+       /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
+       /* and 'badblocks' parameters to work */
        chip->options   |= NAND_SKIP_BBTSCAN;
 
        /*
@@ -1602,6 +2280,15 @@ static int __init ns_init_module(void)
 
        nsmtd->owner = THIS_MODULE;
 
+       if ((retval = parse_weakblocks()) != 0)
+               goto error;
+
+       if ((retval = parse_weakpages()) != 0)
+               goto error;
+
+       if ((retval = parse_gravepages()) != 0)
+               goto error;
+
        if ((retval = nand_scan(nsmtd, 1)) != 0) {
                NS_ERR("can't register NAND Simulator\n");
                if (retval > 0)
@@ -1609,9 +2296,28 @@ static int __init ns_init_module(void)
                goto error;
        }
 
+       if (overridesize) {
+               uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
+               if (new_size >> overridesize != nsmtd->erasesize) {
+                       NS_ERR("overridesize is too big\n");
+                       goto err_exit;
+               }
+               /* N.B. This relies on nand_scan not doing anything with the size before we change it */
+               nsmtd->size = new_size;
+               chip->chipsize = new_size;
+               chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
+               chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+       }
+
+       if ((retval = setup_wear_reporting(nsmtd)) != 0)
+               goto err_exit;
+
        if ((retval = init_nandsim(nsmtd)) != 0)
                goto err_exit;
 
+       if ((retval = parse_badblocks(nand, nsmtd)) != 0)
+               goto err_exit;
+
        if ((retval = nand_default_bbt(nsmtd)) != 0)
                goto err_exit;
 
@@ -1628,6 +2334,7 @@ err_exit:
                kfree(nand->partitions[i].name);
 error:
        kfree(nsmtd);
+       free_lists();
 
        return retval;
 }
@@ -1647,6 +2354,7 @@ static void __exit ns_cleanup_module(void)
        for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
                kfree(ns->partitions[i].name);
        kfree(nsmtd);        /* Free other structures */
+       free_lists();
 }
 
 module_exit(ns_cleanup_module);