64772dc0ea2bbe0f40d0cbff42b3cddadb52ceb2
[safe/jmp/linux-2.6] / drivers / mtd / mtdoops.c
1 /*
2  * MTD Oops/Panic logger
3  *
4  * Copyright (C) 2007 Nokia Corporation. All rights reserved.
5  *
6  * Author: Richard Purdie <rpurdie@openedhand.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/console.h>
27 #include <linux/vmalloc.h>
28 #include <linux/workqueue.h>
29 #include <linux/sched.h>
30 #include <linux/wait.h>
31 #include <linux/delay.h>
32 #include <linux/spinlock.h>
33 #include <linux/interrupt.h>
34 #include <linux/mtd/mtd.h>
35
36 /* Maximum MTD partition size */
37 #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
38
39 #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
40
41 static unsigned long record_size = 4096;
42 module_param(record_size, ulong, 0400);
43 MODULE_PARM_DESC(record_size,
44                 "record size for MTD OOPS pages in bytes (default 4096)");
45
46 static struct mtdoops_context {
47         int mtd_index;
48         struct work_struct work_erase;
49         struct work_struct work_write;
50         struct mtd_info *mtd;
51         int oops_pages;
52         int nextpage;
53         int nextcount;
54         unsigned long *oops_page_used;
55         char *name;
56
57         void *oops_buf;
58
59         /* writecount and disabling ready are spin lock protected */
60         spinlock_t writecount_lock;
61         int ready;
62         int writecount;
63 } oops_cxt;
64
65 static void mark_page_used(struct mtdoops_context *cxt, int page)
66 {
67         set_bit(page, cxt->oops_page_used);
68 }
69
70 static void mark_page_unused(struct mtdoops_context *cxt, int page)
71 {
72         clear_bit(page, cxt->oops_page_used);
73 }
74
75 static int page_is_used(struct mtdoops_context *cxt, int page)
76 {
77         return test_bit(page, cxt->oops_page_used);
78 }
79
80 static void mtdoops_erase_callback(struct erase_info *done)
81 {
82         wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
83         wake_up(wait_q);
84 }
85
86 static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
87 {
88         struct mtd_info *mtd = cxt->mtd;
89         u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
90         u32 start_page = start_page_offset / record_size;
91         u32 erase_pages = mtd->erasesize / record_size;
92         struct erase_info erase;
93         DECLARE_WAITQUEUE(wait, current);
94         wait_queue_head_t wait_q;
95         int ret;
96         int page;
97
98         init_waitqueue_head(&wait_q);
99         erase.mtd = mtd;
100         erase.callback = mtdoops_erase_callback;
101         erase.addr = offset;
102         erase.len = mtd->erasesize;
103         erase.priv = (u_long)&wait_q;
104
105         set_current_state(TASK_INTERRUPTIBLE);
106         add_wait_queue(&wait_q, &wait);
107
108         ret = mtd->erase(mtd, &erase);
109         if (ret) {
110                 set_current_state(TASK_RUNNING);
111                 remove_wait_queue(&wait_q, &wait);
112                 printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
113                        (unsigned long long)erase.addr,
114                        (unsigned long long)erase.len, mtd->name);
115                 return ret;
116         }
117
118         schedule();  /* Wait for erase to finish. */
119         remove_wait_queue(&wait_q, &wait);
120
121         /* Mark pages as unused */
122         for (page = start_page; page < start_page + erase_pages; page++)
123                 mark_page_unused(cxt, page);
124
125         return 0;
126 }
127
128 static void mtdoops_inc_counter(struct mtdoops_context *cxt)
129 {
130         cxt->nextpage++;
131         if (cxt->nextpage >= cxt->oops_pages)
132                 cxt->nextpage = 0;
133         cxt->nextcount++;
134         if (cxt->nextcount == 0xffffffff)
135                 cxt->nextcount = 0;
136
137         if (page_is_used(cxt, cxt->nextpage)) {
138                 schedule_work(&cxt->work_erase);
139                 return;
140         }
141
142         printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
143                cxt->nextpage, cxt->nextcount);
144         cxt->ready = 1;
145 }
146
147 /* Scheduled work - when we can't proceed without erasing a block */
148 static void mtdoops_workfunc_erase(struct work_struct *work)
149 {
150         struct mtdoops_context *cxt =
151                         container_of(work, struct mtdoops_context, work_erase);
152         struct mtd_info *mtd = cxt->mtd;
153         int i = 0, j, ret, mod;
154
155         /* We were unregistered */
156         if (!mtd)
157                 return;
158
159         mod = (cxt->nextpage * record_size) % mtd->erasesize;
160         if (mod != 0) {
161                 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
162                 if (cxt->nextpage >= cxt->oops_pages)
163                         cxt->nextpage = 0;
164         }
165
166         while (mtd->block_isbad) {
167                 ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
168                 if (!ret)
169                         break;
170                 if (ret < 0) {
171                         printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
172                         return;
173                 }
174 badblock:
175                 printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
176                        cxt->nextpage * record_size);
177                 i++;
178                 cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
179                 if (cxt->nextpage >= cxt->oops_pages)
180                         cxt->nextpage = 0;
181                 if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
182                         printk(KERN_ERR "mtdoops: all blocks bad!\n");
183                         return;
184                 }
185         }
186
187         for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
188                 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
189
190         if (ret >= 0) {
191                 printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
192                        cxt->nextpage, cxt->nextcount);
193                 cxt->ready = 1;
194                 return;
195         }
196
197         if (mtd->block_markbad && ret == -EIO) {
198                 ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
199                 if (ret < 0) {
200                         printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
201                         return;
202                 }
203         }
204         goto badblock;
205 }
206
207 static void mtdoops_write(struct mtdoops_context *cxt, int panic)
208 {
209         struct mtd_info *mtd = cxt->mtd;
210         size_t retlen;
211         int ret;
212
213         if (cxt->writecount < record_size)
214                 memset(cxt->oops_buf + cxt->writecount, 0xff,
215                                         record_size - cxt->writecount);
216
217         if (panic)
218                 ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
219                                         record_size, &retlen, cxt->oops_buf);
220         else
221                 ret = mtd->write(mtd, cxt->nextpage * record_size,
222                                         record_size, &retlen, cxt->oops_buf);
223
224         cxt->writecount = 0;
225
226         if (retlen != record_size || ret < 0)
227                 printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
228                        cxt->nextpage * record_size, retlen, record_size, ret);
229         mark_page_used(cxt, cxt->nextpage);
230
231         mtdoops_inc_counter(cxt);
232 }
233
234
235 static void mtdoops_workfunc_write(struct work_struct *work)
236 {
237         struct mtdoops_context *cxt =
238                         container_of(work, struct mtdoops_context, work_write);
239
240         mtdoops_write(cxt, 0);
241 }
242
243 static void find_next_position(struct mtdoops_context *cxt)
244 {
245         struct mtd_info *mtd = cxt->mtd;
246         int ret, page, maxpos = 0;
247         u32 count[2], maxcount = 0xffffffff;
248         size_t retlen;
249
250         for (page = 0; page < cxt->oops_pages; page++) {
251                 /* Assume the page is used */
252                 mark_page_used(cxt, page);
253                 ret = mtd->read(mtd, page * record_size, 8, &retlen, (u_char *) &count[0]);
254                 if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) {
255                         printk(KERN_ERR "mtdoops: read failure at %ld (%td of 8 read), err %d\n",
256                                page * record_size, retlen, ret);
257                         continue;
258                 }
259
260                 if (count[0] == 0xffffffff && count[1] == 0xffffffff)
261                         mark_page_unused(cxt, page);
262                 if (count[1] != MTDOOPS_KERNMSG_MAGIC)
263                         continue;
264                 if (count[0] == 0xffffffff)
265                         continue;
266                 if (maxcount == 0xffffffff) {
267                         maxcount = count[0];
268                         maxpos = page;
269                 } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
270                         maxcount = count[0];
271                         maxpos = page;
272                 } else if (count[0] > maxcount && count[0] < 0xc0000000) {
273                         maxcount = count[0];
274                         maxpos = page;
275                 } else if (count[0] > maxcount && count[0] > 0xc0000000
276                                         && maxcount > 0x80000000) {
277                         maxcount = count[0];
278                         maxpos = page;
279                 }
280         }
281         if (maxcount == 0xffffffff) {
282                 cxt->nextpage = 0;
283                 cxt->nextcount = 1;
284                 schedule_work(&cxt->work_erase);
285                 return;
286         }
287
288         cxt->nextpage = maxpos;
289         cxt->nextcount = maxcount;
290
291         mtdoops_inc_counter(cxt);
292 }
293
294
295 static void mtdoops_notify_add(struct mtd_info *mtd)
296 {
297         struct mtdoops_context *cxt = &oops_cxt;
298         u64 mtdoops_pages = mtd->size;
299
300         do_div(mtdoops_pages, record_size);
301
302         if (cxt->name && !strcmp(mtd->name, cxt->name))
303                 cxt->mtd_index = mtd->index;
304
305         if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
306                 return;
307
308         if (mtd->size < mtd->erasesize * 2) {
309                 printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
310                        mtd->index);
311                 return;
312         }
313
314         if (mtd->erasesize < record_size) {
315                 printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
316                        mtd->index);
317                 return;
318         }
319
320         if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
321                 printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
322                        mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
323                 return;
324         }
325
326         /* oops_page_used is a bit field */
327         cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
328                         BITS_PER_LONG));
329         if (!cxt->oops_page_used) {
330                 printk(KERN_ERR "Could not allocate page array\n");
331                 return;
332         }
333
334         cxt->mtd = mtd;
335         cxt->oops_pages = (int)mtd->size / record_size;
336         find_next_position(cxt);
337         printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
338 }
339
340 static void mtdoops_notify_remove(struct mtd_info *mtd)
341 {
342         struct mtdoops_context *cxt = &oops_cxt;
343
344         if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
345                 return;
346
347         cxt->mtd = NULL;
348         flush_scheduled_work();
349 }
350
351 static void mtdoops_console_sync(void)
352 {
353         struct mtdoops_context *cxt = &oops_cxt;
354         struct mtd_info *mtd = cxt->mtd;
355         unsigned long flags;
356
357         if (!cxt->ready || !mtd || cxt->writecount == 0)
358                 return;
359
360         /*
361          *  Once ready is 0 and we've held the lock no further writes to the
362          *  buffer will happen
363          */
364         spin_lock_irqsave(&cxt->writecount_lock, flags);
365         if (!cxt->ready) {
366                 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
367                 return;
368         }
369         cxt->ready = 0;
370         spin_unlock_irqrestore(&cxt->writecount_lock, flags);
371
372         if (mtd->panic_write && in_interrupt())
373                 /* Interrupt context, we're going to panic so try and log */
374                 mtdoops_write(cxt, 1);
375         else
376                 schedule_work(&cxt->work_write);
377 }
378
379 static void
380 mtdoops_console_write(struct console *co, const char *s, unsigned int count)
381 {
382         struct mtdoops_context *cxt = co->data;
383         struct mtd_info *mtd = cxt->mtd;
384         unsigned long flags;
385
386         if (!oops_in_progress) {
387                 mtdoops_console_sync();
388                 return;
389         }
390
391         if (!cxt->ready || !mtd)
392                 return;
393
394         /* Locking on writecount ensures sequential writes to the buffer */
395         spin_lock_irqsave(&cxt->writecount_lock, flags);
396
397         /* Check ready status didn't change whilst waiting for the lock */
398         if (!cxt->ready) {
399                 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
400                 return;
401         }
402
403         if (cxt->writecount == 0) {
404                 u32 *stamp = cxt->oops_buf;
405                 *stamp++ = cxt->nextcount;
406                 *stamp = MTDOOPS_KERNMSG_MAGIC;
407                 cxt->writecount = 8;
408         }
409
410         if (count + cxt->writecount > record_size)
411                 count = record_size - cxt->writecount;
412
413         memcpy(cxt->oops_buf + cxt->writecount, s, count);
414         cxt->writecount += count;
415
416         spin_unlock_irqrestore(&cxt->writecount_lock, flags);
417
418         if (cxt->writecount == record_size)
419                 mtdoops_console_sync();
420 }
421
422 static int __init mtdoops_console_setup(struct console *co, char *options)
423 {
424         struct mtdoops_context *cxt = co->data;
425
426         if (cxt->mtd_index != -1 || cxt->name)
427                 return -EBUSY;
428         if (options) {
429                 cxt->name = kstrdup(options, GFP_KERNEL);
430                 return 0;
431         }
432         if (co->index == -1)
433                 return -EINVAL;
434
435         cxt->mtd_index = co->index;
436         return 0;
437 }
438
439 static struct mtd_notifier mtdoops_notifier = {
440         .add    = mtdoops_notify_add,
441         .remove = mtdoops_notify_remove,
442 };
443
444 static struct console mtdoops_console = {
445         .name           = "ttyMTD",
446         .write          = mtdoops_console_write,
447         .setup          = mtdoops_console_setup,
448         .unblank        = mtdoops_console_sync,
449         .index          = -1,
450         .data           = &oops_cxt,
451 };
452
453 static int __init mtdoops_console_init(void)
454 {
455         struct mtdoops_context *cxt = &oops_cxt;
456
457         if ((record_size & 4095) != 0) {
458                 printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
459                 return -EINVAL;
460         }
461         if (record_size < 4096) {
462                 printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
463                 return -EINVAL;
464         }
465         cxt->mtd_index = -1;
466         cxt->oops_buf = vmalloc(record_size);
467         if (!cxt->oops_buf) {
468                 printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
469                 return -ENOMEM;
470         }
471
472         spin_lock_init(&cxt->writecount_lock);
473         INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
474         INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
475
476         register_console(&mtdoops_console);
477         register_mtd_user(&mtdoops_notifier);
478         return 0;
479 }
480
481 static void __exit mtdoops_console_exit(void)
482 {
483         struct mtdoops_context *cxt = &oops_cxt;
484
485         unregister_mtd_user(&mtdoops_notifier);
486         unregister_console(&mtdoops_console);
487         kfree(cxt->name);
488         vfree(cxt->oops_buf);
489         vfree(cxt->oops_page_used);
490 }
491
492
493 subsys_initcall(mtdoops_console_init);
494 module_exit(mtdoops_console_exit);
495
496 MODULE_LICENSE("GPL");
497 MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
498 MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");