kgdb: Add the ability to schedule a breakpoint via a tasklet
[safe/jmp/linux-2.6] / kernel / debug / debug_core.c
1 /*
2  * Kernel Debug Core
3  *
4  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
5  *
6  * Copyright (C) 2000-2001 VERITAS Software Corporation.
7  * Copyright (C) 2002-2004 Timesys Corporation
8  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
9  * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
10  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
11  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
12  * Copyright (C) 2005-2009 Wind River Systems, Inc.
13  * Copyright (C) 2007 MontaVista Software, Inc.
14  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
15  *
16  * Contributors at various stages not listed above:
17  *  Jason Wessel ( jason.wessel@windriver.com )
18  *  George Anzinger <george@mvista.com>
19  *  Anurekh Saxena (anurekh.saxena@timesys.com)
20  *  Lake Stevens Instrument Division (Glenn Engel)
21  *  Jim Kingdon, Cygnus Support.
22  *
23  * Original KGDB stub: David Grothe <dave@gcom.com>,
24  * Tigran Aivazian <tigran@sco.com>
25  *
26  * This file is licensed under the terms of the GNU General Public License
27  * version 2. This program is licensed "as is" without any warranty of any
28  * kind, whether express or implied.
29  */
30 #include <linux/pid_namespace.h>
31 #include <linux/clocksource.h>
32 #include <linux/interrupt.h>
33 #include <linux/spinlock.h>
34 #include <linux/console.h>
35 #include <linux/threads.h>
36 #include <linux/uaccess.h>
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/ptrace.h>
40 #include <linux/string.h>
41 #include <linux/delay.h>
42 #include <linux/sched.h>
43 #include <linux/sysrq.h>
44 #include <linux/init.h>
45 #include <linux/kgdb.h>
46 #include <linux/kdb.h>
47 #include <linux/pid.h>
48 #include <linux/smp.h>
49 #include <linux/mm.h>
50
51 #include <asm/cacheflush.h>
52 #include <asm/byteorder.h>
53 #include <asm/atomic.h>
54 #include <asm/system.h>
55
56 #include "debug_core.h"
57
58 static int kgdb_break_asap;
59
60 struct debuggerinfo_struct kgdb_info[NR_CPUS];
61
62 /**
63  * kgdb_connected - Is a host GDB connected to us?
64  */
65 int                             kgdb_connected;
66 EXPORT_SYMBOL_GPL(kgdb_connected);
67
68 /* All the KGDB handlers are installed */
69 int                     kgdb_io_module_registered;
70
71 /* Guard for recursive entry */
72 static int                      exception_level;
73
74 struct kgdb_io          *dbg_io_ops;
75 static DEFINE_SPINLOCK(kgdb_registration_lock);
76
77 /* kgdb console driver is loaded */
78 static int kgdb_con_registered;
79 /* determine if kgdb console output should be used */
80 static int kgdb_use_con;
81 /* Next cpu to become the master debug core */
82 int dbg_switch_cpu;
83
84 /* Use kdb or gdbserver mode */
85 int dbg_kdb_mode = 1;
86
87 static int __init opt_kgdb_con(char *str)
88 {
89         kgdb_use_con = 1;
90         return 0;
91 }
92
93 early_param("kgdbcon", opt_kgdb_con);
94
95 module_param(kgdb_use_con, int, 0644);
96
97 /*
98  * Holds information about breakpoints in a kernel. These breakpoints are
99  * added and removed by gdb.
100  */
101 static struct kgdb_bkpt         kgdb_break[KGDB_MAX_BREAKPOINTS] = {
102         [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
103 };
104
105 /*
106  * The CPU# of the active CPU, or -1 if none:
107  */
108 atomic_t                        kgdb_active = ATOMIC_INIT(-1);
109 EXPORT_SYMBOL_GPL(kgdb_active);
110
111 /*
112  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
113  * bootup code (which might not have percpu set up yet):
114  */
115 static atomic_t                 passive_cpu_wait[NR_CPUS];
116 static atomic_t                 cpu_in_kgdb[NR_CPUS];
117 static atomic_t                 kgdb_break_tasklet_var;
118 atomic_t                        kgdb_setting_breakpoint;
119
120 struct task_struct              *kgdb_usethread;
121 struct task_struct              *kgdb_contthread;
122
123 int                             kgdb_single_step;
124 static pid_t                    kgdb_sstep_pid;
125
126 /* to keep track of the CPU which is doing the single stepping*/
127 atomic_t                        kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
128
129 /*
130  * If you are debugging a problem where roundup (the collection of
131  * all other CPUs) is a problem [this should be extremely rare],
132  * then use the nokgdbroundup option to avoid roundup. In that case
133  * the other CPUs might interfere with your debugging context, so
134  * use this with care:
135  */
136 static int kgdb_do_roundup = 1;
137
138 static int __init opt_nokgdbroundup(char *str)
139 {
140         kgdb_do_roundup = 0;
141
142         return 0;
143 }
144
145 early_param("nokgdbroundup", opt_nokgdbroundup);
146
147 /*
148  * Finally, some KGDB code :-)
149  */
150
151 /*
152  * Weak aliases for breakpoint management,
153  * can be overriden by architectures when needed:
154  */
155 int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
156 {
157         int err;
158
159         err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
160         if (err)
161                 return err;
162
163         return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
164                                   BREAK_INSTR_SIZE);
165 }
166
167 int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
168 {
169         return probe_kernel_write((char *)addr,
170                                   (char *)bundle, BREAK_INSTR_SIZE);
171 }
172
173 int __weak kgdb_validate_break_address(unsigned long addr)
174 {
175         char tmp_variable[BREAK_INSTR_SIZE];
176         int err;
177         /* Validate setting the breakpoint and then removing it.  In the
178          * remove fails, the kernel needs to emit a bad message because we
179          * are deep trouble not being able to put things back the way we
180          * found them.
181          */
182         err = kgdb_arch_set_breakpoint(addr, tmp_variable);
183         if (err)
184                 return err;
185         err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
186         if (err)
187                 printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
188                    "memory destroyed at: %lx", addr);
189         return err;
190 }
191
192 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
193 {
194         return instruction_pointer(regs);
195 }
196
197 int __weak kgdb_arch_init(void)
198 {
199         return 0;
200 }
201
202 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
203 {
204         return 0;
205 }
206
207 /**
208  *      kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
209  *      @regs: Current &struct pt_regs.
210  *
211  *      This function will be called if the particular architecture must
212  *      disable hardware debugging while it is processing gdb packets or
213  *      handling exception.
214  */
215 void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
216 {
217 }
218
219 /*
220  * Some architectures need cache flushes when we set/clear a
221  * breakpoint:
222  */
223 static void kgdb_flush_swbreak_addr(unsigned long addr)
224 {
225         if (!CACHE_FLUSH_IS_SAFE)
226                 return;
227
228         if (current->mm && current->mm->mmap_cache) {
229                 flush_cache_range(current->mm->mmap_cache,
230                                   addr, addr + BREAK_INSTR_SIZE);
231         }
232         /* Force flush instruction cache if it was outside the mm */
233         flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
234 }
235
236 /*
237  * SW breakpoint management:
238  */
239 int dbg_activate_sw_breakpoints(void)
240 {
241         unsigned long addr;
242         int error;
243         int ret = 0;
244         int i;
245
246         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
247                 if (kgdb_break[i].state != BP_SET)
248                         continue;
249
250                 addr = kgdb_break[i].bpt_addr;
251                 error = kgdb_arch_set_breakpoint(addr,
252                                 kgdb_break[i].saved_instr);
253                 if (error) {
254                         ret = error;
255                         printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
256                         continue;
257                 }
258
259                 kgdb_flush_swbreak_addr(addr);
260                 kgdb_break[i].state = BP_ACTIVE;
261         }
262         return ret;
263 }
264
265 int dbg_set_sw_break(unsigned long addr)
266 {
267         int err = kgdb_validate_break_address(addr);
268         int breakno = -1;
269         int i;
270
271         if (err)
272                 return err;
273
274         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
275                 if ((kgdb_break[i].state == BP_SET) &&
276                                         (kgdb_break[i].bpt_addr == addr))
277                         return -EEXIST;
278         }
279         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
280                 if (kgdb_break[i].state == BP_REMOVED &&
281                                         kgdb_break[i].bpt_addr == addr) {
282                         breakno = i;
283                         break;
284                 }
285         }
286
287         if (breakno == -1) {
288                 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
289                         if (kgdb_break[i].state == BP_UNDEFINED) {
290                                 breakno = i;
291                                 break;
292                         }
293                 }
294         }
295
296         if (breakno == -1)
297                 return -E2BIG;
298
299         kgdb_break[breakno].state = BP_SET;
300         kgdb_break[breakno].type = BP_BREAKPOINT;
301         kgdb_break[breakno].bpt_addr = addr;
302
303         return 0;
304 }
305
306 int dbg_deactivate_sw_breakpoints(void)
307 {
308         unsigned long addr;
309         int error;
310         int ret = 0;
311         int i;
312
313         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
314                 if (kgdb_break[i].state != BP_ACTIVE)
315                         continue;
316                 addr = kgdb_break[i].bpt_addr;
317                 error = kgdb_arch_remove_breakpoint(addr,
318                                         kgdb_break[i].saved_instr);
319                 if (error) {
320                         printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
321                         ret = error;
322                 }
323
324                 kgdb_flush_swbreak_addr(addr);
325                 kgdb_break[i].state = BP_SET;
326         }
327         return ret;
328 }
329
330 int dbg_remove_sw_break(unsigned long addr)
331 {
332         int i;
333
334         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
335                 if ((kgdb_break[i].state == BP_SET) &&
336                                 (kgdb_break[i].bpt_addr == addr)) {
337                         kgdb_break[i].state = BP_REMOVED;
338                         return 0;
339                 }
340         }
341         return -ENOENT;
342 }
343
344 int kgdb_isremovedbreak(unsigned long addr)
345 {
346         int i;
347
348         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
349                 if ((kgdb_break[i].state == BP_REMOVED) &&
350                                         (kgdb_break[i].bpt_addr == addr))
351                         return 1;
352         }
353         return 0;
354 }
355
356 int dbg_remove_all_break(void)
357 {
358         unsigned long addr;
359         int error;
360         int i;
361
362         /* Clear memory breakpoints. */
363         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
364                 if (kgdb_break[i].state != BP_ACTIVE)
365                         goto setundefined;
366                 addr = kgdb_break[i].bpt_addr;
367                 error = kgdb_arch_remove_breakpoint(addr,
368                                 kgdb_break[i].saved_instr);
369                 if (error)
370                         printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
371                            addr);
372 setundefined:
373                 kgdb_break[i].state = BP_UNDEFINED;
374         }
375
376         /* Clear hardware breakpoints. */
377         if (arch_kgdb_ops.remove_all_hw_break)
378                 arch_kgdb_ops.remove_all_hw_break();
379
380         return 0;
381 }
382
383 /*
384  * Return true if there is a valid kgdb I/O module.  Also if no
385  * debugger is attached a message can be printed to the console about
386  * waiting for the debugger to attach.
387  *
388  * The print_wait argument is only to be true when called from inside
389  * the core kgdb_handle_exception, because it will wait for the
390  * debugger to attach.
391  */
392 static int kgdb_io_ready(int print_wait)
393 {
394         if (!dbg_io_ops)
395                 return 0;
396         if (kgdb_connected)
397                 return 1;
398         if (atomic_read(&kgdb_setting_breakpoint))
399                 return 1;
400         if (print_wait) {
401 #ifdef CONFIG_KGDB_KDB
402                 if (!dbg_kdb_mode)
403                         printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
404 #else
405                 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
406 #endif
407         }
408         return 1;
409 }
410
411 static int kgdb_reenter_check(struct kgdb_state *ks)
412 {
413         unsigned long addr;
414
415         if (atomic_read(&kgdb_active) != raw_smp_processor_id())
416                 return 0;
417
418         /* Panic on recursive debugger calls: */
419         exception_level++;
420         addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
421         dbg_deactivate_sw_breakpoints();
422
423         /*
424          * If the break point removed ok at the place exception
425          * occurred, try to recover and print a warning to the end
426          * user because the user planted a breakpoint in a place that
427          * KGDB needs in order to function.
428          */
429         if (dbg_remove_sw_break(addr) == 0) {
430                 exception_level = 0;
431                 kgdb_skipexception(ks->ex_vector, ks->linux_regs);
432                 dbg_activate_sw_breakpoints();
433                 printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
434                         addr);
435                 WARN_ON_ONCE(1);
436
437                 return 1;
438         }
439         dbg_remove_all_break();
440         kgdb_skipexception(ks->ex_vector, ks->linux_regs);
441
442         if (exception_level > 1) {
443                 dump_stack();
444                 panic("Recursive entry to debugger");
445         }
446
447         printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
448         dump_stack();
449         panic("Recursive entry to debugger");
450
451         return 1;
452 }
453
454 static void dbg_cpu_switch(int cpu, int next_cpu)
455 {
456         /* Mark the cpu we are switching away from as a slave when it
457          * holds the kgdb_active token.  This must be done so that the
458          * that all the cpus wait in for the debug core will not enter
459          * again as the master. */
460         if (cpu == atomic_read(&kgdb_active)) {
461                 kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
462                 kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER;
463         }
464         kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER;
465 }
466
467 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
468 {
469         unsigned long flags;
470         int sstep_tries = 100;
471         int error;
472         int i, cpu;
473         int trace_on = 0;
474 acquirelock:
475         /*
476          * Interrupts will be restored by the 'trap return' code, except when
477          * single stepping.
478          */
479         local_irq_save(flags);
480
481         cpu = ks->cpu;
482         kgdb_info[cpu].debuggerinfo = regs;
483         kgdb_info[cpu].task = current;
484         kgdb_info[cpu].ret_state = 0;
485         kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
486         /*
487          * Make sure the above info reaches the primary CPU before
488          * our cpu_in_kgdb[] flag setting does:
489          */
490         atomic_inc(&cpu_in_kgdb[cpu]);
491
492         /*
493          * CPU will loop if it is a slave or request to become a kgdb
494          * master cpu and acquire the kgdb_active lock:
495          */
496         while (1) {
497 cpu_loop:
498                 if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
499                         kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
500                         goto cpu_master_loop;
501                 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
502                         if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
503                                 break;
504                 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
505                         if (!atomic_read(&passive_cpu_wait[cpu]))
506                                 goto return_normal;
507                 } else {
508 return_normal:
509                         /* Return to normal operation by executing any
510                          * hw breakpoint fixup.
511                          */
512                         if (arch_kgdb_ops.correct_hw_break)
513                                 arch_kgdb_ops.correct_hw_break();
514                         if (trace_on)
515                                 tracing_on();
516                         atomic_dec(&cpu_in_kgdb[cpu]);
517                         touch_softlockup_watchdog_sync();
518                         clocksource_touch_watchdog();
519                         local_irq_restore(flags);
520                         return 0;
521                 }
522                 cpu_relax();
523         }
524
525         /*
526          * For single stepping, try to only enter on the processor
527          * that was single stepping.  To gaurd against a deadlock, the
528          * kernel will only try for the value of sstep_tries before
529          * giving up and continuing on.
530          */
531         if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
532             (kgdb_info[cpu].task &&
533              kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
534                 atomic_set(&kgdb_active, -1);
535                 touch_softlockup_watchdog_sync();
536                 clocksource_touch_watchdog();
537                 local_irq_restore(flags);
538
539                 goto acquirelock;
540         }
541
542         if (!kgdb_io_ready(1)) {
543                 kgdb_info[cpu].ret_state = 1;
544                 goto kgdb_restore; /* No I/O connection, resume the system */
545         }
546
547         /*
548          * Don't enter if we have hit a removed breakpoint.
549          */
550         if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
551                 goto kgdb_restore;
552
553         /* Call the I/O driver's pre_exception routine */
554         if (dbg_io_ops->pre_exception)
555                 dbg_io_ops->pre_exception();
556
557         kgdb_disable_hw_debug(ks->linux_regs);
558
559         /*
560          * Get the passive CPU lock which will hold all the non-primary
561          * CPU in a spin state while the debugger is active
562          */
563         if (!kgdb_single_step) {
564                 for (i = 0; i < NR_CPUS; i++)
565                         atomic_inc(&passive_cpu_wait[i]);
566         }
567
568 #ifdef CONFIG_SMP
569         /* Signal the other CPUs to enter kgdb_wait() */
570         if ((!kgdb_single_step) && kgdb_do_roundup)
571                 kgdb_roundup_cpus(flags);
572 #endif
573
574         /*
575          * Wait for the other CPUs to be notified and be waiting for us:
576          */
577         for_each_online_cpu(i) {
578                 while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i]))
579                         cpu_relax();
580         }
581
582         /*
583          * At this point the primary processor is completely
584          * in the debugger and all secondary CPUs are quiescent
585          */
586         dbg_deactivate_sw_breakpoints();
587         kgdb_single_step = 0;
588         kgdb_contthread = current;
589         exception_level = 0;
590         trace_on = tracing_is_on();
591         if (trace_on)
592                 tracing_off();
593
594         while (1) {
595 cpu_master_loop:
596                 if (dbg_kdb_mode) {
597                         kgdb_connected = 1;
598                         error = kdb_stub(ks);
599                 } else {
600                         error = gdb_serial_stub(ks);
601                 }
602
603                 if (error == DBG_PASS_EVENT) {
604                         dbg_kdb_mode = !dbg_kdb_mode;
605                         kgdb_connected = 0;
606                 } else if (error == DBG_SWITCH_CPU_EVENT) {
607                         dbg_cpu_switch(cpu, dbg_switch_cpu);
608                         goto cpu_loop;
609                 } else {
610                         kgdb_info[cpu].ret_state = error;
611                         break;
612                 }
613         }
614
615         /* Call the I/O driver's post_exception routine */
616         if (dbg_io_ops->post_exception)
617                 dbg_io_ops->post_exception();
618
619         atomic_dec(&cpu_in_kgdb[ks->cpu]);
620
621         if (!kgdb_single_step) {
622                 for (i = NR_CPUS-1; i >= 0; i--)
623                         atomic_dec(&passive_cpu_wait[i]);
624                 /*
625                  * Wait till all the CPUs have quit from the debugger,
626                  * but allow a CPU that hit an exception and is
627                  * waiting to become the master to remain in the debug
628                  * core.
629                  */
630                 for_each_online_cpu(i) {
631                         while (kgdb_do_roundup &&
632                                atomic_read(&cpu_in_kgdb[i]) &&
633                                !(kgdb_info[i].exception_state &
634                                  DCPU_WANT_MASTER))
635                                 cpu_relax();
636                 }
637         }
638
639 kgdb_restore:
640         if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
641                 int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
642                 if (kgdb_info[sstep_cpu].task)
643                         kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
644                 else
645                         kgdb_sstep_pid = 0;
646         }
647         if (trace_on)
648                 tracing_on();
649         /* Free kgdb_active */
650         atomic_set(&kgdb_active, -1);
651         touch_softlockup_watchdog_sync();
652         clocksource_touch_watchdog();
653         local_irq_restore(flags);
654
655         return kgdb_info[cpu].ret_state;
656 }
657
658 /*
659  * kgdb_handle_exception() - main entry point from a kernel exception
660  *
661  * Locking hierarchy:
662  *      interface locks, if any (begin_session)
663  *      kgdb lock (kgdb_active)
664  */
665 int
666 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
667 {
668         struct kgdb_state kgdb_var;
669         struct kgdb_state *ks = &kgdb_var;
670         int ret;
671
672         ks->cpu                 = raw_smp_processor_id();
673         ks->ex_vector           = evector;
674         ks->signo               = signo;
675         ks->err_code            = ecode;
676         ks->kgdb_usethreadid    = 0;
677         ks->linux_regs          = regs;
678
679         if (kgdb_reenter_check(ks))
680                 return 0; /* Ouch, double exception ! */
681         kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
682         ret = kgdb_cpu_enter(ks, regs);
683         kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER |
684                                                 DCPU_IS_SLAVE);
685         return ret;
686 }
687
688 int kgdb_nmicallback(int cpu, void *regs)
689 {
690 #ifdef CONFIG_SMP
691         struct kgdb_state kgdb_var;
692         struct kgdb_state *ks = &kgdb_var;
693
694         memset(ks, 0, sizeof(struct kgdb_state));
695         ks->cpu                 = cpu;
696         ks->linux_regs          = regs;
697
698         if (!atomic_read(&cpu_in_kgdb[cpu]) &&
699             atomic_read(&kgdb_active) != -1 &&
700             atomic_read(&kgdb_active) != cpu) {
701                 kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
702                 kgdb_cpu_enter(ks, regs);
703                 kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
704                 return 0;
705         }
706 #endif
707         return 1;
708 }
709
710 static void kgdb_console_write(struct console *co, const char *s,
711    unsigned count)
712 {
713         unsigned long flags;
714
715         /* If we're debugging, or KGDB has not connected, don't try
716          * and print. */
717         if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
718                 return;
719
720         local_irq_save(flags);
721         gdbstub_msg_write(s, count);
722         local_irq_restore(flags);
723 }
724
725 static struct console kgdbcons = {
726         .name           = "kgdb",
727         .write          = kgdb_console_write,
728         .flags          = CON_PRINTBUFFER | CON_ENABLED,
729         .index          = -1,
730 };
731
732 #ifdef CONFIG_MAGIC_SYSRQ
733 static void sysrq_handle_dbg(int key, struct tty_struct *tty)
734 {
735         if (!dbg_io_ops) {
736                 printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
737                 return;
738         }
739         if (!kgdb_connected) {
740 #ifdef CONFIG_KGDB_KDB
741                 if (!dbg_kdb_mode)
742                         printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
743 #else
744                 printk(KERN_CRIT "Entering KGDB\n");
745 #endif
746         }
747
748         kgdb_breakpoint();
749 }
750
751 static struct sysrq_key_op sysrq_dbg_op = {
752         .handler        = sysrq_handle_dbg,
753         .help_msg       = "debug(G)",
754         .action_msg     = "DEBUG",
755 };
756 #endif
757
758 static void kgdb_register_callbacks(void)
759 {
760         if (!kgdb_io_module_registered) {
761                 kgdb_io_module_registered = 1;
762                 kgdb_arch_init();
763 #ifdef CONFIG_MAGIC_SYSRQ
764                 register_sysrq_key('g', &sysrq_dbg_op);
765 #endif
766                 if (kgdb_use_con && !kgdb_con_registered) {
767                         register_console(&kgdbcons);
768                         kgdb_con_registered = 1;
769                 }
770         }
771 }
772
773 static void kgdb_unregister_callbacks(void)
774 {
775         /*
776          * When this routine is called KGDB should unregister from the
777          * panic handler and clean up, making sure it is not handling any
778          * break exceptions at the time.
779          */
780         if (kgdb_io_module_registered) {
781                 kgdb_io_module_registered = 0;
782                 kgdb_arch_exit();
783 #ifdef CONFIG_MAGIC_SYSRQ
784                 unregister_sysrq_key('g', &sysrq_dbg_op);
785 #endif
786                 if (kgdb_con_registered) {
787                         unregister_console(&kgdbcons);
788                         kgdb_con_registered = 0;
789                 }
790         }
791 }
792
793 /*
794  * There are times a tasklet needs to be used vs a compiled in
795  * break point so as to cause an exception outside a kgdb I/O module,
796  * such as is the case with kgdboe, where calling a breakpoint in the
797  * I/O driver itself would be fatal.
798  */
799 static void kgdb_tasklet_bpt(unsigned long ing)
800 {
801         kgdb_breakpoint();
802         atomic_set(&kgdb_break_tasklet_var, 0);
803 }
804
805 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
806
807 void kgdb_schedule_breakpoint(void)
808 {
809         if (atomic_read(&kgdb_break_tasklet_var) ||
810                 atomic_read(&kgdb_active) != -1 ||
811                 atomic_read(&kgdb_setting_breakpoint))
812                 return;
813         atomic_inc(&kgdb_break_tasklet_var);
814         tasklet_schedule(&kgdb_tasklet_breakpoint);
815 }
816 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
817
818 static void kgdb_initial_breakpoint(void)
819 {
820         kgdb_break_asap = 0;
821
822         printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
823         kgdb_breakpoint();
824 }
825
826 /**
827  *      kgdb_register_io_module - register KGDB IO module
828  *      @new_dbg_io_ops: the io ops vector
829  *
830  *      Register it with the KGDB core.
831  */
832 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
833 {
834         int err;
835
836         spin_lock(&kgdb_registration_lock);
837
838         if (dbg_io_ops) {
839                 spin_unlock(&kgdb_registration_lock);
840
841                 printk(KERN_ERR "kgdb: Another I/O driver is already "
842                                 "registered with KGDB.\n");
843                 return -EBUSY;
844         }
845
846         if (new_dbg_io_ops->init) {
847                 err = new_dbg_io_ops->init();
848                 if (err) {
849                         spin_unlock(&kgdb_registration_lock);
850                         return err;
851                 }
852         }
853
854         dbg_io_ops = new_dbg_io_ops;
855
856         spin_unlock(&kgdb_registration_lock);
857
858         printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
859                new_dbg_io_ops->name);
860
861         /* Arm KGDB now. */
862         kgdb_register_callbacks();
863
864         if (kgdb_break_asap)
865                 kgdb_initial_breakpoint();
866
867         return 0;
868 }
869 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
870
871 /**
872  *      kkgdb_unregister_io_module - unregister KGDB IO module
873  *      @old_dbg_io_ops: the io ops vector
874  *
875  *      Unregister it with the KGDB core.
876  */
877 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
878 {
879         BUG_ON(kgdb_connected);
880
881         /*
882          * KGDB is no longer able to communicate out, so
883          * unregister our callbacks and reset state.
884          */
885         kgdb_unregister_callbacks();
886
887         spin_lock(&kgdb_registration_lock);
888
889         WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
890         dbg_io_ops = NULL;
891
892         spin_unlock(&kgdb_registration_lock);
893
894         printk(KERN_INFO
895                 "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
896                 old_dbg_io_ops->name);
897 }
898 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
899
900 int dbg_io_get_char(void)
901 {
902         int ret = dbg_io_ops->read_char();
903         if (ret == NO_POLL_CHAR)
904                 return -1;
905         if (!dbg_kdb_mode)
906                 return ret;
907         if (ret == 127)
908                 return 8;
909         return ret;
910 }
911
912 /**
913  * kgdb_breakpoint - generate breakpoint exception
914  *
915  * This function will generate a breakpoint exception.  It is used at the
916  * beginning of a program to sync up with a debugger and can be used
917  * otherwise as a quick means to stop program execution and "break" into
918  * the debugger.
919  */
920 void kgdb_breakpoint(void)
921 {
922         atomic_inc(&kgdb_setting_breakpoint);
923         wmb(); /* Sync point before breakpoint */
924         arch_kgdb_breakpoint();
925         wmb(); /* Sync point after breakpoint */
926         atomic_dec(&kgdb_setting_breakpoint);
927 }
928 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
929
930 static int __init opt_kgdb_wait(char *str)
931 {
932         kgdb_break_asap = 1;
933
934         kdb_init(KDB_INIT_EARLY);
935         if (kgdb_io_module_registered)
936                 kgdb_initial_breakpoint();
937
938         return 0;
939 }
940
941 early_param("kgdbwait", opt_kgdb_wait);