x86/oprofile: fix initialization of arch_perfmon for core_i7
[safe/jmp/linux-2.6] / arch / x86 / oprofile / nmi_int.c
index 944bbcd..28ee490 100644 (file)
@@ -1,10 +1,11 @@
 /**
  * @file nmi_int.c
  *
- * @remark Copyright 2002 OProfile authors
+ * @remark Copyright 2002-2008 OProfile authors
  * @remark Read the file COPYING
  *
  * @author John Levon <levon@movementarian.org>
+ * @author Robert Richter <robert.richter@amd.com>
  */
 
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/moduleparam.h>
 #include <linux/kdebug.h>
+#include <linux/cpu.h>
 #include <asm/nmi.h>
 #include <asm/msr.h>
 #include <asm/apic.h>
+
 #include "op_counter.h"
 #include "op_x86_model.h"
 
-static struct op_x86_model_spec const * model;
-static struct op_msrs cpu_msrs[NR_CPUS];
-static unsigned long saved_lvtpc[NR_CPUS];
-
-static int nmi_start(void);
-static void nmi_stop(void);
+static struct op_x86_model_spec const *model;
+static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
+static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
 
 /* 0 == registered but off, 1 == registered and on */
 static int nmi_enabled = 0;
 
-#ifdef CONFIG_PM
+/* common functions */
 
-static int nmi_suspend(struct sys_device *dev, pm_message_t state)
-{
-       if (nmi_enabled == 1)
-               nmi_stop();
-       return 0;
-}
-
-
-static int nmi_resume(struct sys_device *dev)
+u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
+                   struct op_counter_config *counter_config)
 {
-       if (nmi_enabled == 1)
-               nmi_start();
-       return 0;
+       u64 val = 0;
+       u16 event = (u16)counter_config->event;
+
+       val |= ARCH_PERFMON_EVENTSEL_INT;
+       val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
+       val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
+       val |= (counter_config->unit_mask & 0xFF) << 8;
+       event &= model->event_mask ? model->event_mask : 0xFF;
+       val |= event & 0xFF;
+       val |= (event & 0x0F00) << 24;
+
+       return val;
 }
 
 
-static struct sysdev_class oprofile_sysclass = {
-       set_kset_name("oprofile"),
-       .resume         = nmi_resume,
-       .suspend        = nmi_suspend,
-};
-
-
-static struct sys_device device_oprofile = {
-       .id     = 0,
-       .cls    = &oprofile_sysclass,
-};
-
-
-static int __init init_sysfs(void)
-{
-       int error;
-       if (!(error = sysdev_class_register(&oprofile_sysclass)))
-               error = sysdev_register(&device_oprofile);
-       return error;
-}
-
-
-static void exit_sysfs(void)
-{
-       sysdev_unregister(&device_oprofile);
-       sysdev_class_unregister(&oprofile_sysclass);
-}
-
-#else
-#define init_sysfs() do { } while (0)
-#define exit_sysfs() do { } while (0)
-#endif /* CONFIG_PM */
-
 static int profile_exceptions_notify(struct notifier_block *self,
                                     unsigned long val, void *data)
 {
@@ -90,10 +58,11 @@ static int profile_exceptions_notify(struct notifier_block *self,
        int ret = NOTIFY_DONE;
        int cpu = smp_processor_id();
 
-       switch(val) {
+       switch (val) {
        case DIE_NMI:
-               if (model->check_ctrs(args->regs, &cpu_msrs[cpu]))
-                       ret = NOTIFY_STOP;
+       case DIE_NMI_IPI:
+               model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu));
+               ret = NOTIFY_STOP;
                break;
        default:
                break;
@@ -101,52 +70,41 @@ static int profile_exceptions_notify(struct notifier_block *self,
        return ret;
 }
 
-static void nmi_cpu_save_registers(struct op_msrs * msrs)
+static void nmi_cpu_save_registers(struct op_msrs *msrs)
 {
-       unsigned int const nr_ctrs = model->num_counters;
-       unsigned int const nr_ctrls = model->num_controls; 
-       struct op_msr * counters = msrs->counters;
-       struct op_msr * controls = msrs->controls;
+       struct op_msr *counters = msrs->counters;
+       struct op_msr *controls = msrs->controls;
        unsigned int i;
 
-       for (i = 0; i < nr_ctrs; ++i) {
-               if (counters[i].addr){
-                       rdmsr(counters[i].addr,
-                               counters[i].saved.low,
-                               counters[i].saved.high);
-               }
+       for (i = 0; i < model->num_counters; ++i) {
+               if (counters[i].addr)
+                       rdmsrl(counters[i].addr, counters[i].saved);
        }
-       for (i = 0; i < nr_ctrls; ++i) {
-               if (controls[i].addr){
-                       rdmsr(controls[i].addr,
-                               controls[i].saved.low,
-                               controls[i].saved.high);
-               }
+
+       for (i = 0; i < model->num_controls; ++i) {
+               if (controls[i].addr)
+                       rdmsrl(controls[i].addr, controls[i].saved);
        }
 }
 
-
-static void nmi_save_registers(void * dummy)
+static void nmi_save_registers(void *dummy)
 {
        int cpu = smp_processor_id();
-       struct op_msrs * msrs = &cpu_msrs[cpu];
+       struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
        nmi_cpu_save_registers(msrs);
 }
 
-
 static void free_msrs(void)
 {
        int i;
        for_each_possible_cpu(i) {
-               kfree(cpu_msrs[i].counters);
-               cpu_msrs[i].counters = NULL;
-               kfree(cpu_msrs[i].controls);
-               cpu_msrs[i].controls = NULL;
+               kfree(per_cpu(cpu_msrs, i).counters);
+               per_cpu(cpu_msrs, i).counters = NULL;
+               kfree(per_cpu(cpu_msrs, i).controls);
+               per_cpu(cpu_msrs, i).controls = NULL;
        }
 }
 
-
 static int allocate_msrs(void)
 {
        int success = 1;
@@ -155,13 +113,15 @@ static int allocate_msrs(void)
 
        int i;
        for_each_possible_cpu(i) {
-               cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
-               if (!cpu_msrs[i].counters) {
+               per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
+                                                               GFP_KERNEL);
+               if (!per_cpu(cpu_msrs, i).counters) {
                        success = 0;
                        break;
                }
-               cpu_msrs[i].controls = kmalloc(controls_size, GFP_KERNEL);
-               if (!cpu_msrs[i].controls) {
+               per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
+                                                               GFP_KERNEL);
+               if (!per_cpu(cpu_msrs, i).controls) {
                        success = 0;
                        break;
                }
@@ -173,33 +133,33 @@ static int allocate_msrs(void)
        return success;
 }
 
-
-static void nmi_cpu_setup(void * dummy)
+static void nmi_cpu_setup(void *dummy)
 {
        int cpu = smp_processor_id();
-       struct op_msrs * msrs = &cpu_msrs[cpu];
+       struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
        spin_lock(&oprofilefs_lock);
-       model->setup_ctrs(msrs);
+       model->setup_ctrs(model, msrs);
        spin_unlock(&oprofilefs_lock);
-       saved_lvtpc[cpu] = apic_read(APIC_LVTPC);
+       per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 }
 
 static struct notifier_block profile_exceptions_nb = {
        .notifier_call = profile_exceptions_notify,
        .next = NULL,
-       .priority = 0
+       .priority = 2
 };
 
 static int nmi_setup(void)
 {
-       int err=0;
+       int err = 0;
        int cpu;
 
        if (!allocate_msrs())
                return -ENOMEM;
 
-       if ((err = register_die_notifier(&profile_exceptions_nb))){
+       err = register_die_notifier(&profile_exceptions_nb);
+       if (err) {
                free_msrs();
                return err;
        }
@@ -209,56 +169,48 @@ static int nmi_setup(void)
         */
 
        /* Assume saved/restored counters are the same on all CPUs */
-       model->fill_in_addresses(&cpu_msrs[0]);
-       for_each_possible_cpu (cpu) {
+       model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
+       for_each_possible_cpu(cpu) {
                if (cpu != 0) {
-                       memcpy(cpu_msrs[cpu].counters, cpu_msrs[0].counters,
+                       memcpy(per_cpu(cpu_msrs, cpu).counters,
+                               per_cpu(cpu_msrs, 0).counters,
                                sizeof(struct op_msr) * model->num_counters);
 
-                       memcpy(cpu_msrs[cpu].controls, cpu_msrs[0].controls,
+                       memcpy(per_cpu(cpu_msrs, cpu).controls,
+                               per_cpu(cpu_msrs, 0).controls,
                                sizeof(struct op_msr) * model->num_controls);
                }
 
        }
-       on_each_cpu(nmi_save_registers, NULL, 0, 1);
-       on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
+       on_each_cpu(nmi_save_registers, NULL, 1);
+       on_each_cpu(nmi_cpu_setup, NULL, 1);
        nmi_enabled = 1;
        return 0;
 }
 
-
-static void nmi_restore_registers(struct op_msrs * msrs)
+static void nmi_restore_registers(struct op_msrs *msrs)
 {
-       unsigned int const nr_ctrs = model->num_counters;
-       unsigned int const nr_ctrls = model->num_controls; 
-       struct op_msr * counters = msrs->counters;
-       struct op_msr * controls = msrs->controls;
+       struct op_msr *counters = msrs->counters;
+       struct op_msr *controls = msrs->controls;
        unsigned int i;
 
-       for (i = 0; i < nr_ctrls; ++i) {
-               if (controls[i].addr){
-                       wrmsr(controls[i].addr,
-                               controls[i].saved.low,
-                               controls[i].saved.high);
-               }
+       for (i = 0; i < model->num_controls; ++i) {
+               if (controls[i].addr)
+                       wrmsrl(controls[i].addr, controls[i].saved);
        }
-       for (i = 0; i < nr_ctrs; ++i) {
-               if (counters[i].addr){
-                       wrmsr(counters[i].addr,
-                               counters[i].saved.low,
-                               counters[i].saved.high);
-               }
+
+       for (i = 0; i < model->num_counters; ++i) {
+               if (counters[i].addr)
+                       wrmsrl(counters[i].addr, counters[i].saved);
        }
 }
 
-static void nmi_cpu_shutdown(void * dummy)
+static void nmi_cpu_shutdown(void *dummy)
 {
        unsigned int v;
        int cpu = smp_processor_id();
-       struct op_msrs * msrs = &cpu_msrs[cpu];
+       struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
+
        /* restoring APIC_LVTPC can trigger an apic error because the delivery
         * mode and vector nr combination can be illegal. That's by design: on
         * power on apic lvt contain a zero vector nr which are legal only for
@@ -266,60 +218,58 @@ static void nmi_cpu_shutdown(void * dummy)
         */
        v = apic_read(APIC_LVTERR);
        apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
-       apic_write(APIC_LVTPC, saved_lvtpc[cpu]);
+       apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
        apic_write(APIC_LVTERR, v);
        nmi_restore_registers(msrs);
 }
 
 static void nmi_shutdown(void)
 {
+       struct op_msrs *msrs;
+
        nmi_enabled = 0;
-       on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_shutdown, NULL, 1);
        unregister_die_notifier(&profile_exceptions_nb);
-       model->shutdown(cpu_msrs);
+       msrs = &get_cpu_var(cpu_msrs);
+       model->shutdown(msrs);
        free_msrs();
+       put_cpu_var(cpu_msrs);
 }
 
-static void nmi_cpu_start(void * dummy)
+static void nmi_cpu_start(void *dummy)
 {
-       struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
+       struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
        model->start(msrs);
 }
 
 static int nmi_start(void)
 {
-       on_each_cpu(nmi_cpu_start, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_start, NULL, 1);
        return 0;
 }
-static void nmi_cpu_stop(void * dummy)
+
+static void nmi_cpu_stop(void *dummy)
 {
-       struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
+       struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
        model->stop(msrs);
 }
+
 static void nmi_stop(void)
 {
-       on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_stop, NULL, 1);
 }
 
-
 struct op_counter_config counter_config[OP_MAX_COUNTER];
 
-static int nmi_create_files(struct super_block * sb, struct dentry * root)
+static int nmi_create_files(struct super_block *sb, struct dentry *root)
 {
        unsigned int i;
 
        for (i = 0; i < model->num_counters; ++i) {
-               struct dentry * dir;
+               struct dentry *dir;
                char buf[4];
-               /* quick little hack to _not_ expose a counter if it is not
+
+               /* quick little hack to _not_ expose a counter if it is not
                 * available for use.  This should protect userspace app.
                 * NOTE:  assumes 1:1 mapping here (that counters are organized
                 *        sequentially in their struct assignment).
@@ -329,25 +279,93 @@ static int nmi_create_files(struct super_block * sb, struct dentry * root)
 
                snprintf(buf,  sizeof(buf), "%d", i);
                dir = oprofilefs_mkdir(sb, root, buf);
-               oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); 
-               oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event); 
-               oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count); 
-               oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); 
-               oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); 
-               oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); 
+               oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
+               oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
+               oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
+               oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
+               oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
+               oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
        }
 
        return 0;
 }
-static int p4force;
-module_param(p4force, int, 0);
-static int __init p4_init(char ** cpu_type)
+
+#ifdef CONFIG_SMP
+static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
+                                void *data)
+{
+       int cpu = (unsigned long)data;
+       switch (action) {
+       case CPU_DOWN_FAILED:
+       case CPU_ONLINE:
+               smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
+               break;
+       case CPU_DOWN_PREPARE:
+               smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block oprofile_cpu_nb = {
+       .notifier_call = oprofile_cpu_notifier
+};
+#endif
+
+#ifdef CONFIG_PM
+
+static int nmi_suspend(struct sys_device *dev, pm_message_t state)
+{
+       /* Only one CPU left, just stop that one */
+       if (nmi_enabled == 1)
+               nmi_cpu_stop(NULL);
+       return 0;
+}
+
+static int nmi_resume(struct sys_device *dev)
+{
+       if (nmi_enabled == 1)
+               nmi_cpu_start(NULL);
+       return 0;
+}
+
+static struct sysdev_class oprofile_sysclass = {
+       .name           = "oprofile",
+       .resume         = nmi_resume,
+       .suspend        = nmi_suspend,
+};
+
+static struct sys_device device_oprofile = {
+       .id     = 0,
+       .cls    = &oprofile_sysclass,
+};
+
+static int __init init_sysfs(void)
+{
+       int error;
+
+       error = sysdev_class_register(&oprofile_sysclass);
+       if (!error)
+               error = sysdev_register(&device_oprofile);
+       return error;
+}
+
+static void exit_sysfs(void)
+{
+       sysdev_unregister(&device_oprofile);
+       sysdev_class_unregister(&oprofile_sysclass);
+}
+
+#else
+#define init_sysfs() do { } while (0)
+#define exit_sysfs() do { } while (0)
+#endif /* CONFIG_PM */
+
+static int __init p4_init(char **cpu_type)
 {
        __u8 cpu_model = boot_cpu_data.x86_model;
 
-       if (!p4force && (cpu_model > 6 || cpu_model == 5))
+       if (cpu_model > 6 || cpu_model == 5)
                return 0;
 
 #ifndef CONFIG_SMP
@@ -356,15 +374,15 @@ static int __init p4_init(char ** cpu_type)
        return 1;
 #else
        switch (smp_num_siblings) {
-               case 1:
-                       *cpu_type = "i386/p4";
-                       model = &op_p4_spec;
-                       return 1;
-
-               case 2:
-                       *cpu_type = "i386/p4-ht";
-                       model = &op_p4_ht2_spec;
-                       return 1;
+       case 1:
+               *cpu_type = "i386/p4";
+               model = &op_p4_spec;
+               return 1;
+
+       case 2:
+               *cpu_type = "i386/p4-ht";
+               model = &op_p4_ht2_spec;
+               return 1;
        }
 #endif
 
@@ -373,28 +391,60 @@ static int __init p4_init(char ** cpu_type)
        return 0;
 }
 
+static int force_arch_perfmon;
+static int force_cpu_type(const char *str, struct kernel_param *kp)
+{
+       if (!strcmp(str, "archperfmon")) {
+               force_arch_perfmon = 1;
+               printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
+       }
 
-static int __init ppro_init(char ** cpu_type)
+       return 0;
+}
+module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
+
+static int __init ppro_init(char **cpu_type)
 {
        __u8 cpu_model = boot_cpu_data.x86_model;
+       struct op_x86_model_spec const *spec = &op_ppro_spec;   /* default */
 
-       if (cpu_model == 14)
+       if (force_arch_perfmon && cpu_has_arch_perfmon)
+               return 0;
+
+       switch (cpu_model) {
+       case 0 ... 2:
+               *cpu_type = "i386/ppro";
+               break;
+       case 3 ... 5:
+               *cpu_type = "i386/pii";
+               break;
+       case 6 ... 8:
+       case 10 ... 11:
+               *cpu_type = "i386/piii";
+               break;
+       case 9:
+       case 13:
+               *cpu_type = "i386/p6_mobile";
+               break;
+       case 14:
                *cpu_type = "i386/core";
-       else if (cpu_model == 15 || cpu_model == 23)
+               break;
+       case 15: case 23:
                *cpu_type = "i386/core_2";
-       else if (cpu_model > 0xd)
+               break;
+       case 26:
+               spec = &op_arch_perfmon_spec;
+               *cpu_type = "i386/core_i7";
+               break;
+       case 28:
+               *cpu_type = "i386/atom";
+               break;
+       default:
+               /* Unknown */
                return 0;
-       else if (cpu_model == 9) {
-               *cpu_type = "i386/p6_mobile";
-       } else if (cpu_model > 5) {
-               *cpu_type = "i386/piii";
-       } else if (cpu_model > 2) {
-               *cpu_type = "i386/pii";
-       } else {
-               *cpu_type = "i386/ppro";
        }
 
-       model = &op_ppro_spec;
+       model = spec;
        return 1;
 }
 
@@ -405,73 +455,100 @@ int __init op_nmi_init(struct oprofile_operations *ops)
 {
        __u8 vendor = boot_cpu_data.x86_vendor;
        __u8 family = boot_cpu_data.x86;
-       char *cpu_type;
+       char *cpu_type = NULL;
+       int ret = 0;
 
        if (!cpu_has_apic)
                return -ENODEV;
+
        switch (vendor) {
-               case X86_VENDOR_AMD:
-                       /* Needs to be at least an Athlon (or hammer in 32bit mode) */
-
-                       switch (family) {
-                       default:
-                               return -ENODEV;
-                       case 6:
-                               model = &op_athlon_spec;
-                               cpu_type = "i386/athlon";
-                               break;
-                       case 0xf:
-                               model = &op_athlon_spec;
-                               /* Actually it could be i386/hammer too, but give
-                                  user space an consistent name. */
-                               cpu_type = "x86-64/hammer";
-                               break;
-                       case 0x10:
-                               model = &op_athlon_spec;
-                               cpu_type = "x86-64/family10";
-                               break;
-                       }
+       case X86_VENDOR_AMD:
+               /* Needs to be at least an Athlon (or hammer in 32bit mode) */
+
+               switch (family) {
+               case 6:
+                       cpu_type = "i386/athlon";
+                       break;
+               case 0xf:
+                       /*
+                        * Actually it could be i386/hammer too, but
+                        * give user space an consistent name.
+                        */
+                       cpu_type = "x86-64/hammer";
                        break;
-               case X86_VENDOR_INTEL:
-                       switch (family) {
-                               /* Pentium IV */
-                               case 0xf:
-                                       if (!p4_init(&cpu_type))
-                                               return -ENODEV;
-                                       break;
-
-                               /* A P6-class processor */
-                               case 6:
-                                       if (!ppro_init(&cpu_type))
-                                               return -ENODEV;
-                                       break;
-
-                               default:
-                                       return -ENODEV;
-                       }
+               case 0x10:
+                       cpu_type = "x86-64/family10";
+                       break;
+               case 0x11:
+                       cpu_type = "x86-64/family11h";
+                       break;
+               default:
+                       return -ENODEV;
+               }
+               model = &op_amd_spec;
+               break;
+
+       case X86_VENDOR_INTEL:
+               switch (family) {
+                       /* Pentium IV */
+               case 0xf:
+                       p4_init(&cpu_type);
+                       break;
+
+                       /* A P6-class processor */
+               case 6:
+                       ppro_init(&cpu_type);
                        break;
 
                default:
+                       break;
+               }
+
+               if (cpu_type)
+                       break;
+
+               if (!cpu_has_arch_perfmon)
                        return -ENODEV;
+
+               /* use arch perfmon as fallback */
+               cpu_type = "i386/arch_perfmon";
+               model = &op_arch_perfmon_spec;
+               break;
+
+       default:
+               return -ENODEV;
        }
 
-       init_sysfs();
-       using_nmi = 1;
+#ifdef CONFIG_SMP
+       register_cpu_notifier(&oprofile_cpu_nb);
+#endif
+       /* default values, can be overwritten by model */
        ops->create_files = nmi_create_files;
        ops->setup = nmi_setup;
        ops->shutdown = nmi_shutdown;
        ops->start = nmi_start;
        ops->stop = nmi_stop;
        ops->cpu_type = cpu_type;
+
+       if (model->init)
+               ret = model->init(ops);
+       if (ret)
+               return ret;
+
+       init_sysfs();
+       using_nmi = 1;
        printk(KERN_INFO "oprofile: using NMI interrupt.\n");
        return 0;
 }
 
-
 void op_nmi_exit(void)
 {
-       if (using_nmi)
+       if (using_nmi) {
                exit_sysfs();
+#ifdef CONFIG_SMP
+               unregister_cpu_notifier(&oprofile_cpu_nb);
+#endif
+       }
+       if (model->exit)
+               model->exit();
 }