[JFFS2] Switch to using an array of jffs2_raw_node_refs instead of a list.
[safe/jmp/linux-2.6] / fs / jffs2 / wbuf.c
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright (C) 2001-2003 Red Hat, Inc.
5  * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6  *
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9  *
10  * For licensing information, see the file 'LICENCE' in this directory.
11  *
12  * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
13  *
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/crc32.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/jiffies.h>
22
23 #include "nodelist.h"
24
25 /* For testing write failures */
26 #undef BREAKME
27 #undef BREAKMEHEADER
28
29 #ifdef BREAKME
30 static unsigned char *brokenbuf;
31 #endif
32
33 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
34 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
35
36 /* max. erase failures before we mark a block bad */
37 #define MAX_ERASE_FAILURES      2
38
39 struct jffs2_inodirty {
40         uint32_t ino;
41         struct jffs2_inodirty *next;
42 };
43
44 static struct jffs2_inodirty inodirty_nomem;
45
46 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
47 {
48         struct jffs2_inodirty *this = c->wbuf_inodes;
49
50         /* If a malloc failed, consider _everything_ dirty */
51         if (this == &inodirty_nomem)
52                 return 1;
53
54         /* If ino == 0, _any_ non-GC writes mean 'yes' */
55         if (this && !ino)
56                 return 1;
57
58         /* Look to see if the inode in question is pending in the wbuf */
59         while (this) {
60                 if (this->ino == ino)
61                         return 1;
62                 this = this->next;
63         }
64         return 0;
65 }
66
67 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
68 {
69         struct jffs2_inodirty *this;
70
71         this = c->wbuf_inodes;
72
73         if (this != &inodirty_nomem) {
74                 while (this) {
75                         struct jffs2_inodirty *next = this->next;
76                         kfree(this);
77                         this = next;
78                 }
79         }
80         c->wbuf_inodes = NULL;
81 }
82
83 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
84 {
85         struct jffs2_inodirty *new;
86
87         /* Mark the superblock dirty so that kupdated will flush... */
88         jffs2_erase_pending_trigger(c);
89
90         if (jffs2_wbuf_pending_for_ino(c, ino))
91                 return;
92
93         new = kmalloc(sizeof(*new), GFP_KERNEL);
94         if (!new) {
95                 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
96                 jffs2_clear_wbuf_ino_list(c);
97                 c->wbuf_inodes = &inodirty_nomem;
98                 return;
99         }
100         new->ino = ino;
101         new->next = c->wbuf_inodes;
102         c->wbuf_inodes = new;
103         return;
104 }
105
106 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
107 {
108         struct list_head *this, *next;
109         static int n;
110
111         if (list_empty(&c->erasable_pending_wbuf_list))
112                 return;
113
114         list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
115                 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
116
117                 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
118                 list_del(this);
119                 if ((jiffies + (n++)) & 127) {
120                         /* Most of the time, we just erase it immediately. Otherwise we
121                            spend ages scanning it on mount, etc. */
122                         D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
123                         list_add_tail(&jeb->list, &c->erase_pending_list);
124                         c->nr_erasing_blocks++;
125                         jffs2_erase_pending_trigger(c);
126                 } else {
127                         /* Sometimes, however, we leave it elsewhere so it doesn't get
128                            immediately reused, and we spread the load a bit. */
129                         D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
130                         list_add_tail(&jeb->list, &c->erasable_list);
131                 }
132         }
133 }
134
135 #define REFILE_NOTEMPTY 0
136 #define REFILE_ANYWAY   1
137
138 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
139 {
140         D1(printk("About to refile bad block at %08x\n", jeb->offset));
141
142         /* File the existing block on the bad_used_list.... */
143         if (c->nextblock == jeb)
144                 c->nextblock = NULL;
145         else /* Not sure this should ever happen... need more coffee */
146                 list_del(&jeb->list);
147         if (jeb->first_node) {
148                 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
149                 list_add(&jeb->list, &c->bad_used_list);
150         } else {
151                 BUG_ON(allow_empty == REFILE_NOTEMPTY);
152                 /* It has to have had some nodes or we couldn't be here */
153                 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
154                 list_add(&jeb->list, &c->erase_pending_list);
155                 c->nr_erasing_blocks++;
156                 jffs2_erase_pending_trigger(c);
157         }
158
159         if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
160                 uint32_t oldfree = jeb->free_size;
161
162                 jffs2_link_node_ref(c, jeb, 
163                                     (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
164                                     oldfree, NULL);
165                 /* convert to wasted */
166                 c->wasted_size += oldfree;
167                 jeb->wasted_size += oldfree;
168                 c->dirty_size -= oldfree;
169                 jeb->dirty_size -= oldfree;
170         }
171
172         jffs2_dbg_dump_block_lists_nolock(c);
173         jffs2_dbg_acct_sanity_check_nolock(c,jeb);
174         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
175 }
176
177 static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
178                                                             struct jffs2_inode_info *f,
179                                                             struct jffs2_raw_node_ref *raw,
180                                                             union jffs2_node_union *node)
181 {
182         struct jffs2_node_frag *frag;
183         struct jffs2_full_dirent *fd;
184
185         dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
186                     node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
187
188         BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
189                je16_to_cpu(node->u.magic) != 0);
190
191         switch (je16_to_cpu(node->u.nodetype)) {
192         case JFFS2_NODETYPE_INODE:
193                 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
194                 BUG_ON(!frag);
195                 /* Find a frag which refers to the full_dnode we want to modify */
196                 while (!frag->node || frag->node->raw != raw) {
197                         frag = frag_next(frag);
198                         BUG_ON(!frag);
199                 }
200                 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
201                 return &frag->node->raw;
202                 break;
203
204         case JFFS2_NODETYPE_DIRENT:
205                 for (fd = f->dents; fd; fd = fd->next) {
206                         if (fd->raw == raw) {
207                                 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
208                                 return &fd->raw;
209                         }
210                 }
211                 BUG();
212         default:
213                 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
214                             je16_to_cpu(node->u.nodetype));
215                 break;
216         }
217         return NULL;
218 }
219
220 /* Recover from failure to write wbuf. Recover the nodes up to the
221  * wbuf, not the one which we were starting to try to write. */
222
223 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
224 {
225         struct jffs2_eraseblock *jeb, *new_jeb;
226         struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
227         size_t retlen;
228         int ret;
229         int nr_refile = 0;
230         unsigned char *buf;
231         uint32_t start, end, ofs, len;
232
233         jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
234
235         spin_lock(&c->erase_completion_lock);
236         jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
237         spin_unlock(&c->erase_completion_lock);
238
239         BUG_ON(!ref_obsolete(jeb->last_node));
240
241         /* Find the first node to be recovered, by skipping over every
242            node which ends before the wbuf starts, or which is obsolete. */
243         for (next = raw = jeb->first_node; next; raw = next) {
244                 next = ref_next(raw);
245
246                 if (ref_obsolete(raw) || 
247                     (next && ref_offset(next) <= c->wbuf_ofs)) {
248                         dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
249                                     ref_offset(raw), ref_flags(raw),
250                                     (ref_offset(raw) + ref_totlen(c, jeb, raw)),
251                                     c->wbuf_ofs);
252                         continue;
253                 }
254                 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
255                             ref_offset(raw), ref_flags(raw),
256                             (ref_offset(raw) + ref_totlen(c, jeb, raw)));
257
258                 first_raw = raw;
259                 break;
260         }
261
262         if (!first_raw) {
263                 /* All nodes were obsolete. Nothing to recover. */
264                 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
265                 c->wbuf_len = 0;
266                 return;
267         }
268
269         start = ref_offset(first_raw);
270         end = ref_offset(jeb->last_node);
271         nr_refile = 1;
272
273         /* Count the number of refs which need to be copied */
274         while ((raw = ref_next(raw)) != jeb->last_node)
275                 nr_refile++;
276
277         dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
278                     start, end, end - start, nr_refile);
279
280         buf = NULL;
281         if (start < c->wbuf_ofs) {
282                 /* First affected node was already partially written.
283                  * Attempt to reread the old data into our buffer. */
284
285                 buf = kmalloc(end - start, GFP_KERNEL);
286                 if (!buf) {
287                         printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
288
289                         goto read_failed;
290                 }
291
292                 /* Do the read... */
293                 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
294
295                 if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
296                         /* ECC recovered */
297                         ret = 0;
298                 }
299                 if (ret || retlen != c->wbuf_ofs - start) {
300                         printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
301
302                         kfree(buf);
303                         buf = NULL;
304                 read_failed:
305                         first_raw = ref_next(first_raw);
306                         nr_refile--;
307                         while (first_raw && ref_obsolete(first_raw)) {
308                                 first_raw = ref_next(first_raw);
309                                 nr_refile--;
310                         }
311
312                         /* If this was the only node to be recovered, give up */
313                         if (!first_raw) {
314                                 c->wbuf_len = 0;
315                                 return;
316                         }
317
318                         /* It wasn't. Go on and try to recover nodes complete in the wbuf */
319                         start = ref_offset(first_raw);
320                         dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
321                                     start, end, end - start, nr_refile);
322
323                 } else {
324                         /* Read succeeded. Copy the remaining data from the wbuf */
325                         memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
326                 }
327         }
328         /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
329            Either 'buf' contains the data, or we find it in the wbuf */
330
331         /* ... and get an allocation of space from a shiny new block instead */
332         ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
333         if (ret) {
334                 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
335                 kfree(buf);
336                 return;
337         }
338
339         ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
340         if (ret) {
341                 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
342                 kfree(buf);
343                 return;
344         }
345
346         ofs = write_ofs(c);
347
348         if (end-start >= c->wbuf_pagesize) {
349                 /* Need to do another write immediately, but it's possible
350                    that this is just because the wbuf itself is completely
351                    full, and there's nothing earlier read back from the
352                    flash. Hence 'buf' isn't necessarily what we're writing
353                    from. */
354                 unsigned char *rewrite_buf = buf?:c->wbuf;
355                 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
356
357                 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
358                           towrite, ofs));
359
360 #ifdef BREAKMEHEADER
361                 static int breakme;
362                 if (breakme++ == 20) {
363                         printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
364                         breakme = 0;
365                         c->mtd->write(c->mtd, ofs, towrite, &retlen,
366                                       brokenbuf);
367                         ret = -EIO;
368                 } else
369 #endif
370                         ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
371                                             rewrite_buf);
372
373                 if (ret || retlen != towrite) {
374                         /* Argh. We tried. Really we did. */
375                         printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
376                         kfree(buf);
377
378                         if (retlen)
379                                 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
380
381                         return;
382                 }
383                 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
384
385                 c->wbuf_len = (end - start) - towrite;
386                 c->wbuf_ofs = ofs + towrite;
387                 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
388                 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
389         } else {
390                 /* OK, now we're left with the dregs in whichever buffer we're using */
391                 if (buf) {
392                         memcpy(c->wbuf, buf, end-start);
393                 } else {
394                         memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
395                 }
396                 c->wbuf_ofs = ofs;
397                 c->wbuf_len = end - start;
398         }
399
400         /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
401         new_jeb = &c->blocks[ofs / c->sector_size];
402
403         spin_lock(&c->erase_completion_lock);
404         for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
405                 uint32_t rawlen = ref_totlen(c, jeb, raw);
406                 struct jffs2_inode_cache *ic;
407                 struct jffs2_raw_node_ref *new_ref;
408                 struct jffs2_raw_node_ref **adjust_ref = NULL;
409                 struct jffs2_inode_info *f = NULL;
410
411                 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
412                           rawlen, ref_offset(raw), ref_flags(raw), ofs));
413
414                 ic = jffs2_raw_ref_to_ic(raw);
415
416                 /* Ick. This XATTR mess should be fixed shortly... */
417                 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
418                         struct jffs2_xattr_datum *xd = (void *)ic;
419                         BUG_ON(xd->node != raw);
420                         adjust_ref = &xd->node;
421                         raw->next_in_ino = NULL;
422                         ic = NULL;
423                 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
424                         struct jffs2_xattr_datum *xr = (void *)ic;
425                         BUG_ON(xr->node != raw);
426                         adjust_ref = &xr->node;
427                         raw->next_in_ino = NULL;
428                         ic = NULL;
429                 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
430                         struct jffs2_raw_node_ref **p = &ic->nodes;
431
432                         /* Remove the old node from the per-inode list */
433                         while (*p && *p != (void *)ic) {
434                                 if (*p == raw) {
435                                         (*p) = (raw->next_in_ino);
436                                         raw->next_in_ino = NULL;
437                                         break;
438                                 }
439                                 p = &((*p)->next_in_ino);
440                         }
441
442                         if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
443                                 /* If it's an in-core inode, then we have to adjust any
444                                    full_dirent or full_dnode structure to point to the
445                                    new version instead of the old */
446                                 f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
447                                 if (IS_ERR(f)) {
448                                         /* Should never happen; it _must_ be present */
449                                         JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
450                                                     ic->ino, PTR_ERR(f));
451                                         BUG();
452                                 }
453                                 /* We don't lock f->sem. There's a number of ways we could
454                                    end up in here with it already being locked, and nobody's
455                                    going to modify it on us anyway because we hold the
456                                    alloc_sem. We're only changing one ->raw pointer too,
457                                    which we can get away with without upsetting readers. */
458                                 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
459                                                                       (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
460                         } else if (unlikely(ic->state != INO_STATE_PRESENT &&
461                                             ic->state != INO_STATE_CHECKEDABSENT &&
462                                             ic->state != INO_STATE_GC)) {
463                                 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
464                                 BUG();
465                         }
466                 }
467
468                 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
469
470                 if (adjust_ref) {
471                         BUG_ON(*adjust_ref != raw);
472                         *adjust_ref = new_ref;
473                 }
474                 if (f)
475                         jffs2_gc_release_inode(c, f);
476
477                 if (!ref_obsolete(raw)) {
478                         jeb->dirty_size += rawlen;
479                         jeb->used_size  -= rawlen;
480                         c->dirty_size += rawlen;
481                         c->used_size -= rawlen;
482                         raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
483                         BUG_ON(raw->next_in_ino);
484                 }
485                 ofs += rawlen;
486         }
487
488         kfree(buf);
489
490         /* Fix up the original jeb now it's on the bad_list */
491         if (first_raw == jeb->first_node) {
492                 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
493                 list_del(&jeb->list);
494                 list_add(&jeb->list, &c->erase_pending_list);
495                 c->nr_erasing_blocks++;
496                 jffs2_erase_pending_trigger(c);
497         }
498
499         jffs2_dbg_acct_sanity_check_nolock(c, jeb);
500         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
501
502         jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
503         jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
504
505         spin_unlock(&c->erase_completion_lock);
506
507         D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
508
509 }
510
511 /* Meaning of pad argument:
512    0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
513    1: Pad, do not adjust nextblock free_size
514    2: Pad, adjust nextblock free_size
515 */
516 #define NOPAD           0
517 #define PAD_NOACCOUNT   1
518 #define PAD_ACCOUNTING  2
519
520 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
521 {
522         struct jffs2_eraseblock *wbuf_jeb;
523         int ret;
524         size_t retlen;
525
526         /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
527            del_timer() the timer we never initialised. */
528         if (!jffs2_is_writebuffered(c))
529                 return 0;
530
531         if (!down_trylock(&c->alloc_sem)) {
532                 up(&c->alloc_sem);
533                 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
534                 BUG();
535         }
536
537         if (!c->wbuf_len)       /* already checked c->wbuf above */
538                 return 0;
539
540         wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
541         if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
542                 return -ENOMEM;
543
544         /* claim remaining space on the page
545            this happens, if we have a change to a new block,
546            or if fsync forces us to flush the writebuffer.
547            if we have a switch to next page, we will not have
548            enough remaining space for this.
549         */
550         if (pad ) {
551                 c->wbuf_len = PAD(c->wbuf_len);
552
553                 /* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
554                    with 8 byte page size */
555                 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
556
557                 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
558                         struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
559                         padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
560                         padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
561                         padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
562                         padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
563                 }
564         }
565         /* else jffs2_flash_writev has actually filled in the rest of the
566            buffer for us, and will deal with the node refs etc. later. */
567
568 #ifdef BREAKME
569         static int breakme;
570         if (breakme++ == 20) {
571                 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
572                 breakme = 0;
573                 c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
574                               brokenbuf);
575                 ret = -EIO;
576         } else
577 #endif
578
579                 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
580
581         if (ret || retlen != c->wbuf_pagesize) {
582                 if (ret)
583                         printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
584                 else {
585                         printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
586                                 retlen, c->wbuf_pagesize);
587                         ret = -EIO;
588                 }
589
590                 jffs2_wbuf_recover(c);
591
592                 return ret;
593         }
594
595         /* Adjust free size of the block if we padded. */
596         if (pad) {
597                 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
598
599                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
600                           (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
601
602                 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
603                    padded. If there is less free space in the block than that,
604                    something screwed up */
605                 if (wbuf_jeb->free_size < waste) {
606                         printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
607                                c->wbuf_ofs, c->wbuf_len, waste);
608                         printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
609                                wbuf_jeb->offset, wbuf_jeb->free_size);
610                         BUG();
611                 }
612
613                 spin_lock(&c->erase_completion_lock);
614
615                 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
616                 /* FIXME: that made it count as dirty. Convert to wasted */
617                 wbuf_jeb->dirty_size -= waste;
618                 c->dirty_size -= waste;
619                 wbuf_jeb->wasted_size += waste;
620                 c->wasted_size += waste;
621         } else
622                 spin_lock(&c->erase_completion_lock);
623
624         /* Stick any now-obsoleted blocks on the erase_pending_list */
625         jffs2_refile_wbuf_blocks(c);
626         jffs2_clear_wbuf_ino_list(c);
627         spin_unlock(&c->erase_completion_lock);
628
629         memset(c->wbuf,0xff,c->wbuf_pagesize);
630         /* adjust write buffer offset, else we get a non contiguous write bug */
631         c->wbuf_ofs += c->wbuf_pagesize;
632         c->wbuf_len = 0;
633         return 0;
634 }
635
636 /* Trigger garbage collection to flush the write-buffer.
637    If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
638    outstanding. If ino arg non-zero, do it only if a write for the
639    given inode is outstanding. */
640 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
641 {
642         uint32_t old_wbuf_ofs;
643         uint32_t old_wbuf_len;
644         int ret = 0;
645
646         D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
647
648         if (!c->wbuf)
649                 return 0;
650
651         down(&c->alloc_sem);
652         if (!jffs2_wbuf_pending_for_ino(c, ino)) {
653                 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
654                 up(&c->alloc_sem);
655                 return 0;
656         }
657
658         old_wbuf_ofs = c->wbuf_ofs;
659         old_wbuf_len = c->wbuf_len;
660
661         if (c->unchecked_size) {
662                 /* GC won't make any progress for a while */
663                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
664                 down_write(&c->wbuf_sem);
665                 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
666                 /* retry flushing wbuf in case jffs2_wbuf_recover
667                    left some data in the wbuf */
668                 if (ret)
669                         ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
670                 up_write(&c->wbuf_sem);
671         } else while (old_wbuf_len &&
672                       old_wbuf_ofs == c->wbuf_ofs) {
673
674                 up(&c->alloc_sem);
675
676                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
677
678                 ret = jffs2_garbage_collect_pass(c);
679                 if (ret) {
680                         /* GC failed. Flush it with padding instead */
681                         down(&c->alloc_sem);
682                         down_write(&c->wbuf_sem);
683                         ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
684                         /* retry flushing wbuf in case jffs2_wbuf_recover
685                            left some data in the wbuf */
686                         if (ret)
687                                 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
688                         up_write(&c->wbuf_sem);
689                         break;
690                 }
691                 down(&c->alloc_sem);
692         }
693
694         D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
695
696         up(&c->alloc_sem);
697         return ret;
698 }
699
700 /* Pad write-buffer to end and write it, wasting space. */
701 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
702 {
703         int ret;
704
705         if (!c->wbuf)
706                 return 0;
707
708         down_write(&c->wbuf_sem);
709         ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
710         /* retry - maybe wbuf recover left some data in wbuf. */
711         if (ret)
712                 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
713         up_write(&c->wbuf_sem);
714
715         return ret;
716 }
717
718 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
719                               size_t len)
720 {
721         if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
722                 return 0;
723
724         if (len > (c->wbuf_pagesize - c->wbuf_len))
725                 len = c->wbuf_pagesize - c->wbuf_len;
726         memcpy(c->wbuf + c->wbuf_len, buf, len);
727         c->wbuf_len += (uint32_t) len;
728         return len;
729 }
730
731 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
732                        unsigned long count, loff_t to, size_t *retlen,
733                        uint32_t ino)
734 {
735         struct jffs2_eraseblock *jeb;
736         size_t wbuf_retlen, donelen = 0;
737         uint32_t outvec_to = to;
738         int ret, invec;
739
740         /* If not writebuffered flash, don't bother */
741         if (!jffs2_is_writebuffered(c))
742                 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
743
744         down_write(&c->wbuf_sem);
745
746         /* If wbuf_ofs is not initialized, set it to target address */
747         if (c->wbuf_ofs == 0xFFFFFFFF) {
748                 c->wbuf_ofs = PAGE_DIV(to);
749                 c->wbuf_len = PAGE_MOD(to);
750                 memset(c->wbuf,0xff,c->wbuf_pagesize);
751         }
752
753         /*
754          * Sanity checks on target address.  It's permitted to write
755          * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
756          * write at the beginning of a new erase block. Anything else,
757          * and you die.  New block starts at xxx000c (0-b = block
758          * header)
759          */
760         if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
761                 /* It's a write to a new block */
762                 if (c->wbuf_len) {
763                         D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
764                                   "causes flush of wbuf at 0x%08x\n",
765                                   (unsigned long)to, c->wbuf_ofs));
766                         ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
767                         if (ret)
768                                 goto outerr;
769                 }
770                 /* set pointer to new block */
771                 c->wbuf_ofs = PAGE_DIV(to);
772                 c->wbuf_len = PAGE_MOD(to);
773         }
774
775         if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
776                 /* We're not writing immediately after the writebuffer. Bad. */
777                 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
778                        "to %08lx\n", (unsigned long)to);
779                 if (c->wbuf_len)
780                         printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
781                                c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
782                 BUG();
783         }
784
785         /* adjust alignment offset */
786         if (c->wbuf_len != PAGE_MOD(to)) {
787                 c->wbuf_len = PAGE_MOD(to);
788                 /* take care of alignment to next page */
789                 if (!c->wbuf_len) {
790                         c->wbuf_len = c->wbuf_pagesize;
791                         ret = __jffs2_flush_wbuf(c, NOPAD);
792                         if (ret)
793                                 goto outerr;
794                 }
795         }
796
797         for (invec = 0; invec < count; invec++) {
798                 int vlen = invecs[invec].iov_len;
799                 uint8_t *v = invecs[invec].iov_base;
800
801                 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
802
803                 if (c->wbuf_len == c->wbuf_pagesize) {
804                         ret = __jffs2_flush_wbuf(c, NOPAD);
805                         if (ret)
806                                 goto outerr;
807                 }
808                 vlen -= wbuf_retlen;
809                 outvec_to += wbuf_retlen;
810                 donelen += wbuf_retlen;
811                 v += wbuf_retlen;
812
813                 if (vlen >= c->wbuf_pagesize) {
814                         ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
815                                             &wbuf_retlen, v);
816                         if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
817                                 goto outfile;
818
819                         vlen -= wbuf_retlen;
820                         outvec_to += wbuf_retlen;
821                         c->wbuf_ofs = outvec_to;
822                         donelen += wbuf_retlen;
823                         v += wbuf_retlen;
824                 }
825
826                 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
827                 if (c->wbuf_len == c->wbuf_pagesize) {
828                         ret = __jffs2_flush_wbuf(c, NOPAD);
829                         if (ret)
830                                 goto outerr;
831                 }
832
833                 outvec_to += wbuf_retlen;
834                 donelen += wbuf_retlen;
835         }
836
837         /*
838          * If there's a remainder in the wbuf and it's a non-GC write,
839          * remember that the wbuf affects this ino
840          */
841         *retlen = donelen;
842
843         if (jffs2_sum_active()) {
844                 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
845                 if (res)
846                         return res;
847         }
848
849         if (c->wbuf_len && ino)
850                 jffs2_wbuf_dirties_inode(c, ino);
851
852         ret = 0;
853         up_write(&c->wbuf_sem);
854         return ret;
855
856 outfile:
857         /*
858          * At this point we have no problem, c->wbuf is empty. However
859          * refile nextblock to avoid writing again to same address.
860          */
861
862         spin_lock(&c->erase_completion_lock);
863
864         jeb = &c->blocks[outvec_to / c->sector_size];
865         jffs2_block_refile(c, jeb, REFILE_ANYWAY);
866
867         spin_unlock(&c->erase_completion_lock);
868
869 outerr:
870         *retlen = 0;
871         up_write(&c->wbuf_sem);
872         return ret;
873 }
874
875 /*
876  *      This is the entry for flash write.
877  *      Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
878 */
879 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
880                       size_t *retlen, const u_char *buf)
881 {
882         struct kvec vecs[1];
883
884         if (!jffs2_is_writebuffered(c))
885                 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
886
887         vecs[0].iov_base = (unsigned char *) buf;
888         vecs[0].iov_len = len;
889         return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
890 }
891
892 /*
893         Handle readback from writebuffer and ECC failure return
894 */
895 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
896 {
897         loff_t  orbf = 0, owbf = 0, lwbf = 0;
898         int     ret;
899
900         if (!jffs2_is_writebuffered(c))
901                 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
902
903         /* Read flash */
904         down_read(&c->wbuf_sem);
905         ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
906
907         if ( (ret == -EBADMSG) && (*retlen == len) ) {
908                 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
909                        len, ofs);
910                 /*
911                  * We have the raw data without ECC correction in the buffer, maybe
912                  * we are lucky and all data or parts are correct. We check the node.
913                  * If data are corrupted node check will sort it out.
914                  * We keep this block, it will fail on write or erase and the we
915                  * mark it bad. Or should we do that now? But we should give him a chance.
916                  * Maybe we had a system crash or power loss before the ecc write or
917                  * a erase was completed.
918                  * So we return success. :)
919                  */
920                 ret = 0;
921         }
922
923         /* if no writebuffer available or write buffer empty, return */
924         if (!c->wbuf_pagesize || !c->wbuf_len)
925                 goto exit;
926
927         /* if we read in a different block, return */
928         if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
929                 goto exit;
930
931         if (ofs >= c->wbuf_ofs) {
932                 owbf = (ofs - c->wbuf_ofs);     /* offset in write buffer */
933                 if (owbf > c->wbuf_len)         /* is read beyond write buffer ? */
934                         goto exit;
935                 lwbf = c->wbuf_len - owbf;      /* number of bytes to copy */
936                 if (lwbf > len)
937                         lwbf = len;
938         } else {
939                 orbf = (c->wbuf_ofs - ofs);     /* offset in read buffer */
940                 if (orbf > len)                 /* is write beyond write buffer ? */
941                         goto exit;
942                 lwbf = len - orbf;              /* number of bytes to copy */
943                 if (lwbf > c->wbuf_len)
944                         lwbf = c->wbuf_len;
945         }
946         if (lwbf > 0)
947                 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
948
949 exit:
950         up_read(&c->wbuf_sem);
951         return ret;
952 }
953
954 /*
955  *      Check, if the out of band area is empty
956  */
957 int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
958 {
959         unsigned char *buf;
960         int     ret = 0;
961         int     i,len,page;
962         size_t  retlen;
963         int     oob_size;
964
965         /* allocate a buffer for all oob data in this sector */
966         oob_size = c->mtd->oobsize;
967         len = 4 * oob_size;
968         buf = kmalloc(len, GFP_KERNEL);
969         if (!buf) {
970                 printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
971                 return -ENOMEM;
972         }
973         /*
974          * if mode = 0, we scan for a total empty oob area, else we have
975          * to take care of the cleanmarker in the first page of the block
976         */
977         ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
978         if (ret) {
979                 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
980                 goto out;
981         }
982
983         if (retlen < len) {
984                 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
985                           "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
986                 ret = -EIO;
987                 goto out;
988         }
989
990         /* Special check for first page */
991         for(i = 0; i < oob_size ; i++) {
992                 /* Yeah, we know about the cleanmarker. */
993                 if (mode && i >= c->fsdata_pos &&
994                     i < c->fsdata_pos + c->fsdata_len)
995                         continue;
996
997                 if (buf[i] != 0xFF) {
998                         D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
999                                   buf[i], i, jeb->offset));
1000                         ret = 1;
1001                         goto out;
1002                 }
1003         }
1004
1005         /* we know, we are aligned :) */
1006         for (page = oob_size; page < len; page += sizeof(long)) {
1007                 unsigned long dat = *(unsigned long *)(&buf[page]);
1008                 if(dat != -1) {
1009                         ret = 1;
1010                         goto out;
1011                 }
1012         }
1013
1014 out:
1015         kfree(buf);
1016
1017         return ret;
1018 }
1019
1020 /*
1021 *       Scan for a valid cleanmarker and for bad blocks
1022 *       For virtual blocks (concatenated physical blocks) check the cleanmarker
1023 *       only in the first page of the first physical block, but scan for bad blocks in all
1024 *       physical blocks
1025 */
1026 int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1027 {
1028         struct jffs2_unknown_node n;
1029         unsigned char buf[2 * NAND_MAX_OOBSIZE];
1030         unsigned char *p;
1031         int ret, i, cnt, retval = 0;
1032         size_t retlen, offset;
1033         int oob_size;
1034
1035         offset = jeb->offset;
1036         oob_size = c->mtd->oobsize;
1037
1038         /* Loop through the physical blocks */
1039         for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
1040                 /* Check first if the block is bad. */
1041                 if (c->mtd->block_isbad (c->mtd, offset)) {
1042                         D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
1043                         return 2;
1044                 }
1045                 /*
1046                    *    We read oob data from page 0 and 1 of the block.
1047                    *    page 0 contains cleanmarker and badblock info
1048                    *    page 1 contains failure count of this block
1049                  */
1050                 ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
1051
1052                 if (ret) {
1053                         D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
1054                         return ret;
1055                 }
1056                 if (retlen < (oob_size << 1)) {
1057                         D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
1058                         return -EIO;
1059                 }
1060
1061                 /* Check cleanmarker only on the first physical block */
1062                 if (!cnt) {
1063                         n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
1064                         n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
1065                         n.totlen = cpu_to_je32 (8);
1066                         p = (unsigned char *) &n;
1067
1068                         for (i = 0; i < c->fsdata_len; i++) {
1069                                 if (buf[c->fsdata_pos + i] != p[i]) {
1070                                         retval = 1;
1071                                 }
1072                         }
1073                         D1(if (retval == 1) {
1074                                 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
1075                                 printk(KERN_WARNING "OOB at %08zx was ", offset);
1076                                 for (i=0; i < oob_size; i++) {
1077                                         printk("%02x ", buf[i]);
1078                                 }
1079                                 printk("\n");
1080                         })
1081                 }
1082                 offset += c->mtd->erasesize;
1083         }
1084         return retval;
1085 }
1086
1087 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1088 {
1089         struct  jffs2_unknown_node n;
1090         int     ret;
1091         size_t  retlen;
1092
1093         n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1094         n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
1095         n.totlen = cpu_to_je32(8);
1096
1097         ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
1098
1099         if (ret) {
1100                 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1101                 return ret;
1102         }
1103         if (retlen != c->fsdata_len) {
1104                 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
1105                 return ret;
1106         }
1107         return 0;
1108 }
1109
1110 /*
1111  * On NAND we try to mark this block bad. If the block was erased more
1112  * than MAX_ERASE_FAILURES we mark it finaly bad.
1113  * Don't care about failures. This block remains on the erase-pending
1114  * or badblock list as long as nobody manipulates the flash with
1115  * a bootloader or something like that.
1116  */
1117
1118 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1119 {
1120         int     ret;
1121
1122         /* if the count is < max, we try to write the counter to the 2nd page oob area */
1123         if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1124                 return 0;
1125
1126         if (!c->mtd->block_markbad)
1127                 return 1; // What else can we do?
1128
1129         D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
1130         ret = c->mtd->block_markbad(c->mtd, bad_offset);
1131
1132         if (ret) {
1133                 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1134                 return ret;
1135         }
1136         return 1;
1137 }
1138
1139 #define NAND_JFFS2_OOB16_FSDALEN        8
1140
1141 static struct nand_oobinfo jffs2_oobinfo_docecc = {
1142         .useecc = MTD_NANDECC_PLACE,
1143         .eccbytes = 6,
1144         .eccpos = {0,1,2,3,4,5}
1145 };
1146
1147
1148 static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
1149 {
1150         struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
1151
1152         /* Do this only, if we have an oob buffer */
1153         if (!c->mtd->oobsize)
1154                 return 0;
1155
1156         /* Cleanmarker is out-of-band, so inline size zero */
1157         c->cleanmarker_size = 0;
1158
1159         /* Should we use autoplacement ? */
1160         if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
1161                 D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
1162                 /* Get the position of the free bytes */
1163                 if (!oinfo->oobfree[0][1]) {
1164                         printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
1165                         return -ENOSPC;
1166                 }
1167                 c->fsdata_pos = oinfo->oobfree[0][0];
1168                 c->fsdata_len = oinfo->oobfree[0][1];
1169                 if (c->fsdata_len > 8)
1170                         c->fsdata_len = 8;
1171         } else {
1172                 /* This is just a legacy fallback and should go away soon */
1173                 switch(c->mtd->ecctype) {
1174                 case MTD_ECC_RS_DiskOnChip:
1175                         printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
1176                         c->oobinfo = &jffs2_oobinfo_docecc;
1177                         c->fsdata_pos = 6;
1178                         c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
1179                         c->badblock_pos = 15;
1180                         break;
1181
1182                 default:
1183                         D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
1184                         return -EINVAL;
1185                 }
1186         }
1187         return 0;
1188 }
1189
1190 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1191 {
1192         int res;
1193
1194         /* Initialise write buffer */
1195         init_rwsem(&c->wbuf_sem);
1196         c->wbuf_pagesize = c->mtd->writesize;
1197         c->wbuf_ofs = 0xFFFFFFFF;
1198
1199         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1200         if (!c->wbuf)
1201                 return -ENOMEM;
1202
1203         res = jffs2_nand_set_oobinfo(c);
1204
1205 #ifdef BREAKME
1206         if (!brokenbuf)
1207                 brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1208         if (!brokenbuf) {
1209                 kfree(c->wbuf);
1210                 return -ENOMEM;
1211         }
1212         memset(brokenbuf, 0xdb, c->wbuf_pagesize);
1213 #endif
1214         return res;
1215 }
1216
1217 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1218 {
1219         kfree(c->wbuf);
1220 }
1221
1222 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1223         c->cleanmarker_size = 0;                /* No cleanmarkers needed */
1224
1225         /* Initialize write buffer */
1226         init_rwsem(&c->wbuf_sem);
1227
1228
1229         c->wbuf_pagesize =  c->mtd->erasesize;
1230
1231         /* Find a suitable c->sector_size
1232          * - Not too much sectors
1233          * - Sectors have to be at least 4 K + some bytes
1234          * - All known dataflashes have erase sizes of 528 or 1056
1235          * - we take at least 8 eraseblocks and want to have at least 8K size
1236          * - The concatenation should be a power of 2
1237         */
1238
1239         c->sector_size = 8 * c->mtd->erasesize;
1240
1241         while (c->sector_size < 8192) {
1242                 c->sector_size *= 2;
1243         }
1244
1245         /* It may be necessary to adjust the flash size */
1246         c->flash_size = c->mtd->size;
1247
1248         if ((c->flash_size % c->sector_size) != 0) {
1249                 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1250                 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1251         };
1252
1253         c->wbuf_ofs = 0xFFFFFFFF;
1254         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1255         if (!c->wbuf)
1256                 return -ENOMEM;
1257
1258         printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
1259
1260         return 0;
1261 }
1262
1263 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1264         kfree(c->wbuf);
1265 }
1266
1267 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1268         /* Cleanmarker currently occupies whole programming regions,
1269          * either one or 2 for 8Byte STMicro flashes. */
1270         c->cleanmarker_size = max(16u, c->mtd->writesize);
1271
1272         /* Initialize write buffer */
1273         init_rwsem(&c->wbuf_sem);
1274         c->wbuf_pagesize = c->mtd->writesize;
1275         c->wbuf_ofs = 0xFFFFFFFF;
1276
1277         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1278         if (!c->wbuf)
1279                 return -ENOMEM;
1280
1281         return 0;
1282 }
1283
1284 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1285         kfree(c->wbuf);
1286 }