[MTD] NAND Consolidate oobinfo handling
[safe/jmp/linux-2.6] / fs / jffs2 / wbuf.c
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright (C) 2001-2003 Red Hat, Inc.
5  * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6  *
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9  *
10  * For licensing information, see the file 'LICENCE' in this directory.
11  *
12  * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
13  *
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/crc32.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/jiffies.h>
22
23 #include "nodelist.h"
24
25 /* For testing write failures */
26 #undef BREAKME
27 #undef BREAKMEHEADER
28
29 #ifdef BREAKME
30 static unsigned char *brokenbuf;
31 #endif
32
33 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
34 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
35
36 /* max. erase failures before we mark a block bad */
37 #define MAX_ERASE_FAILURES      2
38
39 struct jffs2_inodirty {
40         uint32_t ino;
41         struct jffs2_inodirty *next;
42 };
43
44 static struct jffs2_inodirty inodirty_nomem;
45
46 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
47 {
48         struct jffs2_inodirty *this = c->wbuf_inodes;
49
50         /* If a malloc failed, consider _everything_ dirty */
51         if (this == &inodirty_nomem)
52                 return 1;
53
54         /* If ino == 0, _any_ non-GC writes mean 'yes' */
55         if (this && !ino)
56                 return 1;
57
58         /* Look to see if the inode in question is pending in the wbuf */
59         while (this) {
60                 if (this->ino == ino)
61                         return 1;
62                 this = this->next;
63         }
64         return 0;
65 }
66
67 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
68 {
69         struct jffs2_inodirty *this;
70
71         this = c->wbuf_inodes;
72
73         if (this != &inodirty_nomem) {
74                 while (this) {
75                         struct jffs2_inodirty *next = this->next;
76                         kfree(this);
77                         this = next;
78                 }
79         }
80         c->wbuf_inodes = NULL;
81 }
82
83 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
84 {
85         struct jffs2_inodirty *new;
86
87         /* Mark the superblock dirty so that kupdated will flush... */
88         jffs2_erase_pending_trigger(c);
89
90         if (jffs2_wbuf_pending_for_ino(c, ino))
91                 return;
92
93         new = kmalloc(sizeof(*new), GFP_KERNEL);
94         if (!new) {
95                 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
96                 jffs2_clear_wbuf_ino_list(c);
97                 c->wbuf_inodes = &inodirty_nomem;
98                 return;
99         }
100         new->ino = ino;
101         new->next = c->wbuf_inodes;
102         c->wbuf_inodes = new;
103         return;
104 }
105
106 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
107 {
108         struct list_head *this, *next;
109         static int n;
110
111         if (list_empty(&c->erasable_pending_wbuf_list))
112                 return;
113
114         list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
115                 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
116
117                 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
118                 list_del(this);
119                 if ((jiffies + (n++)) & 127) {
120                         /* Most of the time, we just erase it immediately. Otherwise we
121                            spend ages scanning it on mount, etc. */
122                         D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
123                         list_add_tail(&jeb->list, &c->erase_pending_list);
124                         c->nr_erasing_blocks++;
125                         jffs2_erase_pending_trigger(c);
126                 } else {
127                         /* Sometimes, however, we leave it elsewhere so it doesn't get
128                            immediately reused, and we spread the load a bit. */
129                         D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
130                         list_add_tail(&jeb->list, &c->erasable_list);
131                 }
132         }
133 }
134
135 #define REFILE_NOTEMPTY 0
136 #define REFILE_ANYWAY   1
137
138 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
139 {
140         D1(printk("About to refile bad block at %08x\n", jeb->offset));
141
142         /* File the existing block on the bad_used_list.... */
143         if (c->nextblock == jeb)
144                 c->nextblock = NULL;
145         else /* Not sure this should ever happen... need more coffee */
146                 list_del(&jeb->list);
147         if (jeb->first_node) {
148                 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
149                 list_add(&jeb->list, &c->bad_used_list);
150         } else {
151                 BUG_ON(allow_empty == REFILE_NOTEMPTY);
152                 /* It has to have had some nodes or we couldn't be here */
153                 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
154                 list_add(&jeb->list, &c->erase_pending_list);
155                 c->nr_erasing_blocks++;
156                 jffs2_erase_pending_trigger(c);
157         }
158
159         if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
160                 uint32_t oldfree = jeb->free_size;
161
162                 jffs2_link_node_ref(c, jeb, 
163                                     (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
164                                     oldfree, NULL);
165                 /* convert to wasted */
166                 c->wasted_size += oldfree;
167                 jeb->wasted_size += oldfree;
168                 c->dirty_size -= oldfree;
169                 jeb->dirty_size -= oldfree;
170         }
171
172         jffs2_dbg_dump_block_lists_nolock(c);
173         jffs2_dbg_acct_sanity_check_nolock(c,jeb);
174         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
175 }
176
177 static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
178                                                             struct jffs2_inode_info *f,
179                                                             struct jffs2_raw_node_ref *raw,
180                                                             union jffs2_node_union *node)
181 {
182         struct jffs2_node_frag *frag;
183         struct jffs2_full_dirent *fd;
184
185         dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
186                     node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
187
188         BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
189                je16_to_cpu(node->u.magic) != 0);
190
191         switch (je16_to_cpu(node->u.nodetype)) {
192         case JFFS2_NODETYPE_INODE:
193                 if (f->metadata && f->metadata->raw == raw) {
194                         dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
195                         return &f->metadata->raw;
196                 }
197                 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
198                 BUG_ON(!frag);
199                 /* Find a frag which refers to the full_dnode we want to modify */
200                 while (!frag->node || frag->node->raw != raw) {
201                         frag = frag_next(frag);
202                         BUG_ON(!frag);
203                 }
204                 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
205                 return &frag->node->raw;
206
207         case JFFS2_NODETYPE_DIRENT:
208                 for (fd = f->dents; fd; fd = fd->next) {
209                         if (fd->raw == raw) {
210                                 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
211                                 return &fd->raw;
212                         }
213                 }
214                 BUG();
215
216         default:
217                 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
218                             je16_to_cpu(node->u.nodetype));
219                 break;
220         }
221         return NULL;
222 }
223
224 /* Recover from failure to write wbuf. Recover the nodes up to the
225  * wbuf, not the one which we were starting to try to write. */
226
227 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
228 {
229         struct jffs2_eraseblock *jeb, *new_jeb;
230         struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
231         size_t retlen;
232         int ret;
233         int nr_refile = 0;
234         unsigned char *buf;
235         uint32_t start, end, ofs, len;
236
237         jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
238
239         spin_lock(&c->erase_completion_lock);
240         jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
241         spin_unlock(&c->erase_completion_lock);
242
243         BUG_ON(!ref_obsolete(jeb->last_node));
244
245         /* Find the first node to be recovered, by skipping over every
246            node which ends before the wbuf starts, or which is obsolete. */
247         for (next = raw = jeb->first_node; next; raw = next) {
248                 next = ref_next(raw);
249
250                 if (ref_obsolete(raw) || 
251                     (next && ref_offset(next) <= c->wbuf_ofs)) {
252                         dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
253                                     ref_offset(raw), ref_flags(raw),
254                                     (ref_offset(raw) + ref_totlen(c, jeb, raw)),
255                                     c->wbuf_ofs);
256                         continue;
257                 }
258                 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
259                             ref_offset(raw), ref_flags(raw),
260                             (ref_offset(raw) + ref_totlen(c, jeb, raw)));
261
262                 first_raw = raw;
263                 break;
264         }
265
266         if (!first_raw) {
267                 /* All nodes were obsolete. Nothing to recover. */
268                 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
269                 c->wbuf_len = 0;
270                 return;
271         }
272
273         start = ref_offset(first_raw);
274         end = ref_offset(jeb->last_node);
275         nr_refile = 1;
276
277         /* Count the number of refs which need to be copied */
278         while ((raw = ref_next(raw)) != jeb->last_node)
279                 nr_refile++;
280
281         dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
282                     start, end, end - start, nr_refile);
283
284         buf = NULL;
285         if (start < c->wbuf_ofs) {
286                 /* First affected node was already partially written.
287                  * Attempt to reread the old data into our buffer. */
288
289                 buf = kmalloc(end - start, GFP_KERNEL);
290                 if (!buf) {
291                         printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
292
293                         goto read_failed;
294                 }
295
296                 /* Do the read... */
297                 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
298
299                 if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
300                         /* ECC recovered */
301                         ret = 0;
302                 }
303                 if (ret || retlen != c->wbuf_ofs - start) {
304                         printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
305
306                         kfree(buf);
307                         buf = NULL;
308                 read_failed:
309                         first_raw = ref_next(first_raw);
310                         nr_refile--;
311                         while (first_raw && ref_obsolete(first_raw)) {
312                                 first_raw = ref_next(first_raw);
313                                 nr_refile--;
314                         }
315
316                         /* If this was the only node to be recovered, give up */
317                         if (!first_raw) {
318                                 c->wbuf_len = 0;
319                                 return;
320                         }
321
322                         /* It wasn't. Go on and try to recover nodes complete in the wbuf */
323                         start = ref_offset(first_raw);
324                         dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
325                                     start, end, end - start, nr_refile);
326
327                 } else {
328                         /* Read succeeded. Copy the remaining data from the wbuf */
329                         memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
330                 }
331         }
332         /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
333            Either 'buf' contains the data, or we find it in the wbuf */
334
335         /* ... and get an allocation of space from a shiny new block instead */
336         ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
337         if (ret) {
338                 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
339                 kfree(buf);
340                 return;
341         }
342
343         ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
344         if (ret) {
345                 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
346                 kfree(buf);
347                 return;
348         }
349
350         ofs = write_ofs(c);
351
352         if (end-start >= c->wbuf_pagesize) {
353                 /* Need to do another write immediately, but it's possible
354                    that this is just because the wbuf itself is completely
355                    full, and there's nothing earlier read back from the
356                    flash. Hence 'buf' isn't necessarily what we're writing
357                    from. */
358                 unsigned char *rewrite_buf = buf?:c->wbuf;
359                 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
360
361                 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
362                           towrite, ofs));
363
364 #ifdef BREAKMEHEADER
365                 static int breakme;
366                 if (breakme++ == 20) {
367                         printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
368                         breakme = 0;
369                         c->mtd->write(c->mtd, ofs, towrite, &retlen,
370                                       brokenbuf);
371                         ret = -EIO;
372                 } else
373 #endif
374                         ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
375                                             rewrite_buf);
376
377                 if (ret || retlen != towrite) {
378                         /* Argh. We tried. Really we did. */
379                         printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
380                         kfree(buf);
381
382                         if (retlen)
383                                 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
384
385                         return;
386                 }
387                 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
388
389                 c->wbuf_len = (end - start) - towrite;
390                 c->wbuf_ofs = ofs + towrite;
391                 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
392                 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
393         } else {
394                 /* OK, now we're left with the dregs in whichever buffer we're using */
395                 if (buf) {
396                         memcpy(c->wbuf, buf, end-start);
397                 } else {
398                         memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
399                 }
400                 c->wbuf_ofs = ofs;
401                 c->wbuf_len = end - start;
402         }
403
404         /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
405         new_jeb = &c->blocks[ofs / c->sector_size];
406
407         spin_lock(&c->erase_completion_lock);
408         for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
409                 uint32_t rawlen = ref_totlen(c, jeb, raw);
410                 struct jffs2_inode_cache *ic;
411                 struct jffs2_raw_node_ref *new_ref;
412                 struct jffs2_raw_node_ref **adjust_ref = NULL;
413                 struct jffs2_inode_info *f = NULL;
414
415                 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
416                           rawlen, ref_offset(raw), ref_flags(raw), ofs));
417
418                 ic = jffs2_raw_ref_to_ic(raw);
419
420                 /* Ick. This XATTR mess should be fixed shortly... */
421                 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
422                         struct jffs2_xattr_datum *xd = (void *)ic;
423                         BUG_ON(xd->node != raw);
424                         adjust_ref = &xd->node;
425                         raw->next_in_ino = NULL;
426                         ic = NULL;
427                 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
428                         struct jffs2_xattr_datum *xr = (void *)ic;
429                         BUG_ON(xr->node != raw);
430                         adjust_ref = &xr->node;
431                         raw->next_in_ino = NULL;
432                         ic = NULL;
433                 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
434                         struct jffs2_raw_node_ref **p = &ic->nodes;
435
436                         /* Remove the old node from the per-inode list */
437                         while (*p && *p != (void *)ic) {
438                                 if (*p == raw) {
439                                         (*p) = (raw->next_in_ino);
440                                         raw->next_in_ino = NULL;
441                                         break;
442                                 }
443                                 p = &((*p)->next_in_ino);
444                         }
445
446                         if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
447                                 /* If it's an in-core inode, then we have to adjust any
448                                    full_dirent or full_dnode structure to point to the
449                                    new version instead of the old */
450                                 f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
451                                 if (IS_ERR(f)) {
452                                         /* Should never happen; it _must_ be present */
453                                         JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
454                                                     ic->ino, PTR_ERR(f));
455                                         BUG();
456                                 }
457                                 /* We don't lock f->sem. There's a number of ways we could
458                                    end up in here with it already being locked, and nobody's
459                                    going to modify it on us anyway because we hold the
460                                    alloc_sem. We're only changing one ->raw pointer too,
461                                    which we can get away with without upsetting readers. */
462                                 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
463                                                                       (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
464                         } else if (unlikely(ic->state != INO_STATE_PRESENT &&
465                                             ic->state != INO_STATE_CHECKEDABSENT &&
466                                             ic->state != INO_STATE_GC)) {
467                                 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
468                                 BUG();
469                         }
470                 }
471
472                 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
473
474                 if (adjust_ref) {
475                         BUG_ON(*adjust_ref != raw);
476                         *adjust_ref = new_ref;
477                 }
478                 if (f)
479                         jffs2_gc_release_inode(c, f);
480
481                 if (!ref_obsolete(raw)) {
482                         jeb->dirty_size += rawlen;
483                         jeb->used_size  -= rawlen;
484                         c->dirty_size += rawlen;
485                         c->used_size -= rawlen;
486                         raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
487                         BUG_ON(raw->next_in_ino);
488                 }
489                 ofs += rawlen;
490         }
491
492         kfree(buf);
493
494         /* Fix up the original jeb now it's on the bad_list */
495         if (first_raw == jeb->first_node) {
496                 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
497                 list_del(&jeb->list);
498                 list_add(&jeb->list, &c->erase_pending_list);
499                 c->nr_erasing_blocks++;
500                 jffs2_erase_pending_trigger(c);
501         }
502
503         jffs2_dbg_acct_sanity_check_nolock(c, jeb);
504         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
505
506         jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
507         jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
508
509         spin_unlock(&c->erase_completion_lock);
510
511         D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
512
513 }
514
515 /* Meaning of pad argument:
516    0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
517    1: Pad, do not adjust nextblock free_size
518    2: Pad, adjust nextblock free_size
519 */
520 #define NOPAD           0
521 #define PAD_NOACCOUNT   1
522 #define PAD_ACCOUNTING  2
523
524 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
525 {
526         struct jffs2_eraseblock *wbuf_jeb;
527         int ret;
528         size_t retlen;
529
530         /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
531            del_timer() the timer we never initialised. */
532         if (!jffs2_is_writebuffered(c))
533                 return 0;
534
535         if (!down_trylock(&c->alloc_sem)) {
536                 up(&c->alloc_sem);
537                 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
538                 BUG();
539         }
540
541         if (!c->wbuf_len)       /* already checked c->wbuf above */
542                 return 0;
543
544         wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
545         if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
546                 return -ENOMEM;
547
548         /* claim remaining space on the page
549            this happens, if we have a change to a new block,
550            or if fsync forces us to flush the writebuffer.
551            if we have a switch to next page, we will not have
552            enough remaining space for this.
553         */
554         if (pad ) {
555                 c->wbuf_len = PAD(c->wbuf_len);
556
557                 /* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
558                    with 8 byte page size */
559                 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
560
561                 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
562                         struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
563                         padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
564                         padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
565                         padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
566                         padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
567                 }
568         }
569         /* else jffs2_flash_writev has actually filled in the rest of the
570            buffer for us, and will deal with the node refs etc. later. */
571
572 #ifdef BREAKME
573         static int breakme;
574         if (breakme++ == 20) {
575                 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
576                 breakme = 0;
577                 c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
578                               brokenbuf);
579                 ret = -EIO;
580         } else
581 #endif
582
583                 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
584
585         if (ret || retlen != c->wbuf_pagesize) {
586                 if (ret)
587                         printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
588                 else {
589                         printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
590                                 retlen, c->wbuf_pagesize);
591                         ret = -EIO;
592                 }
593
594                 jffs2_wbuf_recover(c);
595
596                 return ret;
597         }
598
599         /* Adjust free size of the block if we padded. */
600         if (pad) {
601                 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
602
603                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
604                           (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
605
606                 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
607                    padded. If there is less free space in the block than that,
608                    something screwed up */
609                 if (wbuf_jeb->free_size < waste) {
610                         printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
611                                c->wbuf_ofs, c->wbuf_len, waste);
612                         printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
613                                wbuf_jeb->offset, wbuf_jeb->free_size);
614                         BUG();
615                 }
616
617                 spin_lock(&c->erase_completion_lock);
618
619                 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
620                 /* FIXME: that made it count as dirty. Convert to wasted */
621                 wbuf_jeb->dirty_size -= waste;
622                 c->dirty_size -= waste;
623                 wbuf_jeb->wasted_size += waste;
624                 c->wasted_size += waste;
625         } else
626                 spin_lock(&c->erase_completion_lock);
627
628         /* Stick any now-obsoleted blocks on the erase_pending_list */
629         jffs2_refile_wbuf_blocks(c);
630         jffs2_clear_wbuf_ino_list(c);
631         spin_unlock(&c->erase_completion_lock);
632
633         memset(c->wbuf,0xff,c->wbuf_pagesize);
634         /* adjust write buffer offset, else we get a non contiguous write bug */
635         c->wbuf_ofs += c->wbuf_pagesize;
636         c->wbuf_len = 0;
637         return 0;
638 }
639
640 /* Trigger garbage collection to flush the write-buffer.
641    If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
642    outstanding. If ino arg non-zero, do it only if a write for the
643    given inode is outstanding. */
644 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
645 {
646         uint32_t old_wbuf_ofs;
647         uint32_t old_wbuf_len;
648         int ret = 0;
649
650         D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
651
652         if (!c->wbuf)
653                 return 0;
654
655         down(&c->alloc_sem);
656         if (!jffs2_wbuf_pending_for_ino(c, ino)) {
657                 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
658                 up(&c->alloc_sem);
659                 return 0;
660         }
661
662         old_wbuf_ofs = c->wbuf_ofs;
663         old_wbuf_len = c->wbuf_len;
664
665         if (c->unchecked_size) {
666                 /* GC won't make any progress for a while */
667                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
668                 down_write(&c->wbuf_sem);
669                 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
670                 /* retry flushing wbuf in case jffs2_wbuf_recover
671                    left some data in the wbuf */
672                 if (ret)
673                         ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
674                 up_write(&c->wbuf_sem);
675         } else while (old_wbuf_len &&
676                       old_wbuf_ofs == c->wbuf_ofs) {
677
678                 up(&c->alloc_sem);
679
680                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
681
682                 ret = jffs2_garbage_collect_pass(c);
683                 if (ret) {
684                         /* GC failed. Flush it with padding instead */
685                         down(&c->alloc_sem);
686                         down_write(&c->wbuf_sem);
687                         ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
688                         /* retry flushing wbuf in case jffs2_wbuf_recover
689                            left some data in the wbuf */
690                         if (ret)
691                                 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
692                         up_write(&c->wbuf_sem);
693                         break;
694                 }
695                 down(&c->alloc_sem);
696         }
697
698         D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
699
700         up(&c->alloc_sem);
701         return ret;
702 }
703
704 /* Pad write-buffer to end and write it, wasting space. */
705 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
706 {
707         int ret;
708
709         if (!c->wbuf)
710                 return 0;
711
712         down_write(&c->wbuf_sem);
713         ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
714         /* retry - maybe wbuf recover left some data in wbuf. */
715         if (ret)
716                 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
717         up_write(&c->wbuf_sem);
718
719         return ret;
720 }
721
722 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
723                               size_t len)
724 {
725         if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
726                 return 0;
727
728         if (len > (c->wbuf_pagesize - c->wbuf_len))
729                 len = c->wbuf_pagesize - c->wbuf_len;
730         memcpy(c->wbuf + c->wbuf_len, buf, len);
731         c->wbuf_len += (uint32_t) len;
732         return len;
733 }
734
735 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
736                        unsigned long count, loff_t to, size_t *retlen,
737                        uint32_t ino)
738 {
739         struct jffs2_eraseblock *jeb;
740         size_t wbuf_retlen, donelen = 0;
741         uint32_t outvec_to = to;
742         int ret, invec;
743
744         /* If not writebuffered flash, don't bother */
745         if (!jffs2_is_writebuffered(c))
746                 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
747
748         down_write(&c->wbuf_sem);
749
750         /* If wbuf_ofs is not initialized, set it to target address */
751         if (c->wbuf_ofs == 0xFFFFFFFF) {
752                 c->wbuf_ofs = PAGE_DIV(to);
753                 c->wbuf_len = PAGE_MOD(to);
754                 memset(c->wbuf,0xff,c->wbuf_pagesize);
755         }
756
757         /*
758          * Sanity checks on target address.  It's permitted to write
759          * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
760          * write at the beginning of a new erase block. Anything else,
761          * and you die.  New block starts at xxx000c (0-b = block
762          * header)
763          */
764         if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
765                 /* It's a write to a new block */
766                 if (c->wbuf_len) {
767                         D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
768                                   "causes flush of wbuf at 0x%08x\n",
769                                   (unsigned long)to, c->wbuf_ofs));
770                         ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
771                         if (ret)
772                                 goto outerr;
773                 }
774                 /* set pointer to new block */
775                 c->wbuf_ofs = PAGE_DIV(to);
776                 c->wbuf_len = PAGE_MOD(to);
777         }
778
779         if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
780                 /* We're not writing immediately after the writebuffer. Bad. */
781                 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
782                        "to %08lx\n", (unsigned long)to);
783                 if (c->wbuf_len)
784                         printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
785                                c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
786                 BUG();
787         }
788
789         /* adjust alignment offset */
790         if (c->wbuf_len != PAGE_MOD(to)) {
791                 c->wbuf_len = PAGE_MOD(to);
792                 /* take care of alignment to next page */
793                 if (!c->wbuf_len) {
794                         c->wbuf_len = c->wbuf_pagesize;
795                         ret = __jffs2_flush_wbuf(c, NOPAD);
796                         if (ret)
797                                 goto outerr;
798                 }
799         }
800
801         for (invec = 0; invec < count; invec++) {
802                 int vlen = invecs[invec].iov_len;
803                 uint8_t *v = invecs[invec].iov_base;
804
805                 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
806
807                 if (c->wbuf_len == c->wbuf_pagesize) {
808                         ret = __jffs2_flush_wbuf(c, NOPAD);
809                         if (ret)
810                                 goto outerr;
811                 }
812                 vlen -= wbuf_retlen;
813                 outvec_to += wbuf_retlen;
814                 donelen += wbuf_retlen;
815                 v += wbuf_retlen;
816
817                 if (vlen >= c->wbuf_pagesize) {
818                         ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
819                                             &wbuf_retlen, v);
820                         if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
821                                 goto outfile;
822
823                         vlen -= wbuf_retlen;
824                         outvec_to += wbuf_retlen;
825                         c->wbuf_ofs = outvec_to;
826                         donelen += wbuf_retlen;
827                         v += wbuf_retlen;
828                 }
829
830                 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
831                 if (c->wbuf_len == c->wbuf_pagesize) {
832                         ret = __jffs2_flush_wbuf(c, NOPAD);
833                         if (ret)
834                                 goto outerr;
835                 }
836
837                 outvec_to += wbuf_retlen;
838                 donelen += wbuf_retlen;
839         }
840
841         /*
842          * If there's a remainder in the wbuf and it's a non-GC write,
843          * remember that the wbuf affects this ino
844          */
845         *retlen = donelen;
846
847         if (jffs2_sum_active()) {
848                 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
849                 if (res)
850                         return res;
851         }
852
853         if (c->wbuf_len && ino)
854                 jffs2_wbuf_dirties_inode(c, ino);
855
856         ret = 0;
857         up_write(&c->wbuf_sem);
858         return ret;
859
860 outfile:
861         /*
862          * At this point we have no problem, c->wbuf is empty. However
863          * refile nextblock to avoid writing again to same address.
864          */
865
866         spin_lock(&c->erase_completion_lock);
867
868         jeb = &c->blocks[outvec_to / c->sector_size];
869         jffs2_block_refile(c, jeb, REFILE_ANYWAY);
870
871         spin_unlock(&c->erase_completion_lock);
872
873 outerr:
874         *retlen = 0;
875         up_write(&c->wbuf_sem);
876         return ret;
877 }
878
879 /*
880  *      This is the entry for flash write.
881  *      Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
882 */
883 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
884                       size_t *retlen, const u_char *buf)
885 {
886         struct kvec vecs[1];
887
888         if (!jffs2_is_writebuffered(c))
889                 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
890
891         vecs[0].iov_base = (unsigned char *) buf;
892         vecs[0].iov_len = len;
893         return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
894 }
895
896 /*
897         Handle readback from writebuffer and ECC failure return
898 */
899 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
900 {
901         loff_t  orbf = 0, owbf = 0, lwbf = 0;
902         int     ret;
903
904         if (!jffs2_is_writebuffered(c))
905                 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
906
907         /* Read flash */
908         down_read(&c->wbuf_sem);
909         ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
910
911         if ( (ret == -EBADMSG) && (*retlen == len) ) {
912                 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
913                        len, ofs);
914                 /*
915                  * We have the raw data without ECC correction in the buffer, maybe
916                  * we are lucky and all data or parts are correct. We check the node.
917                  * If data are corrupted node check will sort it out.
918                  * We keep this block, it will fail on write or erase and the we
919                  * mark it bad. Or should we do that now? But we should give him a chance.
920                  * Maybe we had a system crash or power loss before the ecc write or
921                  * a erase was completed.
922                  * So we return success. :)
923                  */
924                 ret = 0;
925         }
926
927         /* if no writebuffer available or write buffer empty, return */
928         if (!c->wbuf_pagesize || !c->wbuf_len)
929                 goto exit;
930
931         /* if we read in a different block, return */
932         if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
933                 goto exit;
934
935         if (ofs >= c->wbuf_ofs) {
936                 owbf = (ofs - c->wbuf_ofs);     /* offset in write buffer */
937                 if (owbf > c->wbuf_len)         /* is read beyond write buffer ? */
938                         goto exit;
939                 lwbf = c->wbuf_len - owbf;      /* number of bytes to copy */
940                 if (lwbf > len)
941                         lwbf = len;
942         } else {
943                 orbf = (c->wbuf_ofs - ofs);     /* offset in read buffer */
944                 if (orbf > len)                 /* is write beyond write buffer ? */
945                         goto exit;
946                 lwbf = len - orbf;              /* number of bytes to copy */
947                 if (lwbf > c->wbuf_len)
948                         lwbf = c->wbuf_len;
949         }
950         if (lwbf > 0)
951                 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
952
953 exit:
954         up_read(&c->wbuf_sem);
955         return ret;
956 }
957
958 /*
959  *      Check, if the out of band area is empty
960  */
961 int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
962 {
963         unsigned char *buf;
964         int     ret = 0;
965         int     i,len,page;
966         size_t  retlen;
967         int     oob_size;
968
969         /* allocate a buffer for all oob data in this sector */
970         oob_size = c->mtd->oobsize;
971         len = 4 * oob_size;
972         buf = kmalloc(len, GFP_KERNEL);
973         if (!buf) {
974                 printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
975                 return -ENOMEM;
976         }
977         /*
978          * if mode = 0, we scan for a total empty oob area, else we have
979          * to take care of the cleanmarker in the first page of the block
980         */
981         ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
982         if (ret) {
983                 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
984                 goto out;
985         }
986
987         if (retlen < len) {
988                 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
989                           "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
990                 ret = -EIO;
991                 goto out;
992         }
993
994         /* Special check for first page */
995         for(i = 0; i < oob_size ; i++) {
996                 /* Yeah, we know about the cleanmarker. */
997                 if (mode && i >= c->fsdata_pos &&
998                     i < c->fsdata_pos + c->fsdata_len)
999                         continue;
1000
1001                 if (buf[i] != 0xFF) {
1002                         D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
1003                                   buf[i], i, jeb->offset));
1004                         ret = 1;
1005                         goto out;
1006                 }
1007         }
1008
1009         /* we know, we are aligned :) */
1010         for (page = oob_size; page < len; page += sizeof(long)) {
1011                 unsigned long dat = *(unsigned long *)(&buf[page]);
1012                 if(dat != -1) {
1013                         ret = 1;
1014                         goto out;
1015                 }
1016         }
1017
1018 out:
1019         kfree(buf);
1020
1021         return ret;
1022 }
1023
1024 /*
1025 *       Scan for a valid cleanmarker and for bad blocks
1026 *       For virtual blocks (concatenated physical blocks) check the cleanmarker
1027 *       only in the first page of the first physical block, but scan for bad blocks in all
1028 *       physical blocks
1029 */
1030 int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1031 {
1032         struct jffs2_unknown_node n;
1033         unsigned char buf[2 * NAND_MAX_OOBSIZE];
1034         unsigned char *p;
1035         int ret, i, cnt, retval = 0;
1036         size_t retlen, offset;
1037         int oob_size;
1038
1039         offset = jeb->offset;
1040         oob_size = c->mtd->oobsize;
1041
1042         /* Loop through the physical blocks */
1043         for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
1044                 /* Check first if the block is bad. */
1045                 if (c->mtd->block_isbad (c->mtd, offset)) {
1046                         D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
1047                         return 2;
1048                 }
1049                 /*
1050                    *    We read oob data from page 0 and 1 of the block.
1051                    *    page 0 contains cleanmarker and badblock info
1052                    *    page 1 contains failure count of this block
1053                  */
1054                 ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
1055
1056                 if (ret) {
1057                         D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
1058                         return ret;
1059                 }
1060                 if (retlen < (oob_size << 1)) {
1061                         D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
1062                         return -EIO;
1063                 }
1064
1065                 /* Check cleanmarker only on the first physical block */
1066                 if (!cnt) {
1067                         n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
1068                         n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
1069                         n.totlen = cpu_to_je32 (8);
1070                         p = (unsigned char *) &n;
1071
1072                         for (i = 0; i < c->fsdata_len; i++) {
1073                                 if (buf[c->fsdata_pos + i] != p[i]) {
1074                                         retval = 1;
1075                                 }
1076                         }
1077                         D1(if (retval == 1) {
1078                                 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
1079                                 printk(KERN_WARNING "OOB at %08zx was ", offset);
1080                                 for (i=0; i < oob_size; i++) {
1081                                         printk("%02x ", buf[i]);
1082                                 }
1083                                 printk("\n");
1084                         })
1085                 }
1086                 offset += c->mtd->erasesize;
1087         }
1088         return retval;
1089 }
1090
1091 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1092 {
1093         struct  jffs2_unknown_node n;
1094         int     ret;
1095         size_t  retlen;
1096
1097         n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1098         n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
1099         n.totlen = cpu_to_je32(8);
1100
1101         ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
1102
1103         if (ret) {
1104                 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1105                 return ret;
1106         }
1107         if (retlen != c->fsdata_len) {
1108                 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
1109                 return ret;
1110         }
1111         return 0;
1112 }
1113
1114 /*
1115  * On NAND we try to mark this block bad. If the block was erased more
1116  * than MAX_ERASE_FAILURES we mark it finaly bad.
1117  * Don't care about failures. This block remains on the erase-pending
1118  * or badblock list as long as nobody manipulates the flash with
1119  * a bootloader or something like that.
1120  */
1121
1122 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1123 {
1124         int     ret;
1125
1126         /* if the count is < max, we try to write the counter to the 2nd page oob area */
1127         if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1128                 return 0;
1129
1130         if (!c->mtd->block_markbad)
1131                 return 1; // What else can we do?
1132
1133         D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
1134         ret = c->mtd->block_markbad(c->mtd, bad_offset);
1135
1136         if (ret) {
1137                 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1138                 return ret;
1139         }
1140         return 1;
1141 }
1142
1143 #define NAND_JFFS2_OOB16_FSDALEN        8
1144
1145 static struct nand_oobinfo jffs2_oobinfo_docecc = {
1146         .useecc = MTD_NANDECC_PLACE,
1147         .eccbytes = 6,
1148         .eccpos = {0,1,2,3,4,5}
1149 };
1150
1151
1152 static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
1153 {
1154         struct nand_oobinfo *oinfo = c->mtd->oobinfo;
1155
1156         /* Do this only, if we have an oob buffer */
1157         if (!c->mtd->oobsize)
1158                 return 0;
1159
1160         /* Cleanmarker is out-of-band, so inline size zero */
1161         c->cleanmarker_size = 0;
1162
1163         /* Should we use autoplacement ? */
1164         if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
1165                 D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
1166                 /* Get the position of the free bytes */
1167                 if (!oinfo->oobfree[0][1]) {
1168                         printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
1169                         return -ENOSPC;
1170                 }
1171                 c->fsdata_pos = oinfo->oobfree[0][0];
1172                 c->fsdata_len = oinfo->oobfree[0][1];
1173                 if (c->fsdata_len > 8)
1174                         c->fsdata_len = 8;
1175         } else {
1176                 /* This is just a legacy fallback and should go away soon */
1177                 switch(c->mtd->ecctype) {
1178                 case MTD_ECC_RS_DiskOnChip:
1179                         printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
1180                         c->oobinfo = &jffs2_oobinfo_docecc;
1181                         c->fsdata_pos = 6;
1182                         c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
1183                         c->badblock_pos = 15;
1184                         break;
1185
1186                 default:
1187                         D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
1188                         return -EINVAL;
1189                 }
1190         }
1191         return 0;
1192 }
1193
1194 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1195 {
1196         int res;
1197
1198         /* Initialise write buffer */
1199         init_rwsem(&c->wbuf_sem);
1200         c->wbuf_pagesize = c->mtd->writesize;
1201         c->wbuf_ofs = 0xFFFFFFFF;
1202
1203         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1204         if (!c->wbuf)
1205                 return -ENOMEM;
1206
1207         res = jffs2_nand_set_oobinfo(c);
1208
1209 #ifdef BREAKME
1210         if (!brokenbuf)
1211                 brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1212         if (!brokenbuf) {
1213                 kfree(c->wbuf);
1214                 return -ENOMEM;
1215         }
1216         memset(brokenbuf, 0xdb, c->wbuf_pagesize);
1217 #endif
1218         return res;
1219 }
1220
1221 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1222 {
1223         kfree(c->wbuf);
1224 }
1225
1226 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1227         c->cleanmarker_size = 0;                /* No cleanmarkers needed */
1228
1229         /* Initialize write buffer */
1230         init_rwsem(&c->wbuf_sem);
1231
1232
1233         c->wbuf_pagesize =  c->mtd->erasesize;
1234
1235         /* Find a suitable c->sector_size
1236          * - Not too much sectors
1237          * - Sectors have to be at least 4 K + some bytes
1238          * - All known dataflashes have erase sizes of 528 or 1056
1239          * - we take at least 8 eraseblocks and want to have at least 8K size
1240          * - The concatenation should be a power of 2
1241         */
1242
1243         c->sector_size = 8 * c->mtd->erasesize;
1244
1245         while (c->sector_size < 8192) {
1246                 c->sector_size *= 2;
1247         }
1248
1249         /* It may be necessary to adjust the flash size */
1250         c->flash_size = c->mtd->size;
1251
1252         if ((c->flash_size % c->sector_size) != 0) {
1253                 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1254                 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1255         };
1256
1257         c->wbuf_ofs = 0xFFFFFFFF;
1258         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1259         if (!c->wbuf)
1260                 return -ENOMEM;
1261
1262         printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
1263
1264         return 0;
1265 }
1266
1267 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1268         kfree(c->wbuf);
1269 }
1270
1271 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1272         /* Cleanmarker currently occupies whole programming regions,
1273          * either one or 2 for 8Byte STMicro flashes. */
1274         c->cleanmarker_size = max(16u, c->mtd->writesize);
1275
1276         /* Initialize write buffer */
1277         init_rwsem(&c->wbuf_sem);
1278         c->wbuf_pagesize = c->mtd->writesize;
1279         c->wbuf_ofs = 0xFFFFFFFF;
1280
1281         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1282         if (!c->wbuf)
1283                 return -ENOMEM;
1284
1285         return 0;
1286 }
1287
1288 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1289         kfree(c->wbuf);
1290 }