2 * MTD Oops/Panic logger
4 * Copyright (C) 2007 Nokia Corporation. All rights reserved.
6 * Author: Richard Purdie <rpurdie@openedhand.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/console.h>
27 #include <linux/vmalloc.h>
28 #include <linux/workqueue.h>
29 #include <linux/sched.h>
30 #include <linux/wait.h>
31 #include <linux/delay.h>
32 #include <linux/spinlock.h>
33 #include <linux/interrupt.h>
34 #include <linux/mtd/mtd.h>
36 #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
37 #define OOPS_PAGE_SIZE 4096
39 static struct mtdoops_context {
41 struct work_struct work_erase;
42 struct work_struct work_write;
51 /* writecount and disabling ready are spin lock protected */
52 spinlock_t writecount_lock;
57 static void mtdoops_erase_callback(struct erase_info *done)
59 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
63 static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
65 struct erase_info erase;
66 DECLARE_WAITQUEUE(wait, current);
67 wait_queue_head_t wait_q;
70 init_waitqueue_head(&wait_q);
72 erase.callback = mtdoops_erase_callback;
74 erase.len = mtd->erasesize;
75 erase.priv = (u_long)&wait_q;
77 set_current_state(TASK_INTERRUPTIBLE);
78 add_wait_queue(&wait_q, &wait);
80 ret = mtd->erase(mtd, &erase);
82 set_current_state(TASK_RUNNING);
83 remove_wait_queue(&wait_q, &wait);
84 printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
85 (unsigned long long)erase.addr,
86 (unsigned long long)erase.len, mtd->name);
90 schedule(); /* Wait for erase to finish. */
91 remove_wait_queue(&wait_q, &wait);
96 static void mtdoops_inc_counter(struct mtdoops_context *cxt)
98 struct mtd_info *mtd = cxt->mtd;
104 if (cxt->nextpage >= cxt->oops_pages)
107 if (cxt->nextcount == 0xffffffff)
110 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
111 &retlen, (u_char *) &count);
112 if (retlen != 4 || (ret < 0 && ret != -EUCLEAN)) {
113 printk(KERN_ERR "mtdoops: read failure at %d (%td of 4 read), err %d\n",
114 cxt->nextpage * OOPS_PAGE_SIZE, retlen, ret);
115 schedule_work(&cxt->work_erase);
119 /* See if we need to erase the next block */
120 if (count != 0xffffffff) {
121 schedule_work(&cxt->work_erase);
125 printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
126 cxt->nextpage, cxt->nextcount);
130 /* Scheduled work - when we can't proceed without erasing a block */
131 static void mtdoops_workfunc_erase(struct work_struct *work)
133 struct mtdoops_context *cxt =
134 container_of(work, struct mtdoops_context, work_erase);
135 struct mtd_info *mtd = cxt->mtd;
136 int i = 0, j, ret, mod;
138 /* We were unregistered */
142 mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize;
144 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE);
145 if (cxt->nextpage >= cxt->oops_pages)
149 while (mtd->block_isbad) {
150 ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
154 printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
158 printk(KERN_WARNING "mtdoops: bad block at %08x\n",
159 cxt->nextpage * OOPS_PAGE_SIZE);
161 cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE);
162 if (cxt->nextpage >= cxt->oops_pages)
164 if (i == cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE)) {
165 printk(KERN_ERR "mtdoops: all blocks bad!\n");
170 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
171 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
174 printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
175 cxt->nextpage, cxt->nextcount);
180 if (mtd->block_markbad && ret == -EIO) {
181 ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
183 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
190 static void mtdoops_write(struct mtdoops_context *cxt, int panic)
192 struct mtd_info *mtd = cxt->mtd;
196 if (cxt->writecount < OOPS_PAGE_SIZE)
197 memset(cxt->oops_buf + cxt->writecount, 0xff,
198 OOPS_PAGE_SIZE - cxt->writecount);
201 ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
202 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
204 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
205 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
209 if (retlen != OOPS_PAGE_SIZE || ret < 0)
210 printk(KERN_ERR "mtdoops: write failure at %d (%td of %d written), error %d\n",
211 cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
213 mtdoops_inc_counter(cxt);
217 static void mtdoops_workfunc_write(struct work_struct *work)
219 struct mtdoops_context *cxt =
220 container_of(work, struct mtdoops_context, work_write);
222 mtdoops_write(cxt, 0);
225 static void find_next_position(struct mtdoops_context *cxt)
227 struct mtd_info *mtd = cxt->mtd;
228 int ret, page, maxpos = 0;
229 u32 count[2], maxcount = 0xffffffff;
232 for (page = 0; page < cxt->oops_pages; page++) {
233 ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]);
234 if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) {
235 printk(KERN_ERR "mtdoops: read failure at %d (%td of 8 read), err %d\n",
236 page * OOPS_PAGE_SIZE, retlen, ret);
240 if (count[1] != MTDOOPS_KERNMSG_MAGIC)
242 if (count[0] == 0xffffffff)
244 if (maxcount == 0xffffffff) {
247 } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
250 } else if (count[0] > maxcount && count[0] < 0xc0000000) {
253 } else if (count[0] > maxcount && count[0] > 0xc0000000
254 && maxcount > 0x80000000) {
259 if (maxcount == 0xffffffff) {
262 schedule_work(&cxt->work_erase);
266 cxt->nextpage = maxpos;
267 cxt->nextcount = maxcount;
269 mtdoops_inc_counter(cxt);
273 static void mtdoops_notify_add(struct mtd_info *mtd)
275 struct mtdoops_context *cxt = &oops_cxt;
277 if (cxt->name && !strcmp(mtd->name, cxt->name))
278 cxt->mtd_index = mtd->index;
280 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
283 if (mtd->size < mtd->erasesize * 2) {
284 printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
289 if (mtd->erasesize < OOPS_PAGE_SIZE) {
290 printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
296 if (mtd->size > INT_MAX)
297 cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE;
299 cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE;
301 find_next_position(cxt);
303 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
306 static void mtdoops_notify_remove(struct mtd_info *mtd)
308 struct mtdoops_context *cxt = &oops_cxt;
310 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
314 flush_scheduled_work();
317 static void mtdoops_console_sync(void)
319 struct mtdoops_context *cxt = &oops_cxt;
320 struct mtd_info *mtd = cxt->mtd;
323 if (!cxt->ready || !mtd || cxt->writecount == 0)
327 * Once ready is 0 and we've held the lock no further writes to the
330 spin_lock_irqsave(&cxt->writecount_lock, flags);
332 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
336 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
338 if (mtd->panic_write && in_interrupt())
339 /* Interrupt context, we're going to panic so try and log */
340 mtdoops_write(cxt, 1);
342 schedule_work(&cxt->work_write);
346 mtdoops_console_write(struct console *co, const char *s, unsigned int count)
348 struct mtdoops_context *cxt = co->data;
349 struct mtd_info *mtd = cxt->mtd;
352 if (!oops_in_progress) {
353 mtdoops_console_sync();
357 if (!cxt->ready || !mtd)
360 /* Locking on writecount ensures sequential writes to the buffer */
361 spin_lock_irqsave(&cxt->writecount_lock, flags);
363 /* Check ready status didn't change whilst waiting for the lock */
365 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
369 if (cxt->writecount == 0) {
370 u32 *stamp = cxt->oops_buf;
371 *stamp++ = cxt->nextcount;
372 *stamp = MTDOOPS_KERNMSG_MAGIC;
376 if (count + cxt->writecount > OOPS_PAGE_SIZE)
377 count = OOPS_PAGE_SIZE - cxt->writecount;
379 memcpy(cxt->oops_buf + cxt->writecount, s, count);
380 cxt->writecount += count;
382 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
384 if (cxt->writecount == OOPS_PAGE_SIZE)
385 mtdoops_console_sync();
388 static int __init mtdoops_console_setup(struct console *co, char *options)
390 struct mtdoops_context *cxt = co->data;
392 if (cxt->mtd_index != -1 || cxt->name)
395 cxt->name = kstrdup(options, GFP_KERNEL);
401 cxt->mtd_index = co->index;
405 static struct mtd_notifier mtdoops_notifier = {
406 .add = mtdoops_notify_add,
407 .remove = mtdoops_notify_remove,
410 static struct console mtdoops_console = {
412 .write = mtdoops_console_write,
413 .setup = mtdoops_console_setup,
414 .unblank = mtdoops_console_sync,
419 static int __init mtdoops_console_init(void)
421 struct mtdoops_context *cxt = &oops_cxt;
424 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
425 if (!cxt->oops_buf) {
426 printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
430 spin_lock_init(&cxt->writecount_lock);
431 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
432 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
434 register_console(&mtdoops_console);
435 register_mtd_user(&mtdoops_notifier);
439 static void __exit mtdoops_console_exit(void)
441 struct mtdoops_context *cxt = &oops_cxt;
443 unregister_mtd_user(&mtdoops_notifier);
444 unregister_console(&mtdoops_console);
446 vfree(cxt->oops_buf);
450 subsys_initcall(mtdoops_console_init);
451 module_exit(mtdoops_console_exit);
453 MODULE_LICENSE("GPL");
454 MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
455 MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");