X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=blobdiff_plain;f=arch%2Fpowerpc%2Fkernel%2Frtas.c;h=74367841615a5ae3a31a2c2c9091916d79a0534c;hp=e2e78d967f31b21fc7dbf70de92c46c4f43d457b;hb=5a0e3ad6af8660be21ca98a971cd00f331318c05;hpb=d9b2b2a277219d4812311d995054ce4f95067725 diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index e2e78d9..7436784 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -23,11 +23,11 @@ #include #include #include +#include #include #include #include -#include #include #include #include @@ -39,14 +39,17 @@ #include #include #include +#include +#include struct rtas_t rtas = { - .lock = SPIN_LOCK_UNLOCKED + .lock = __ARCH_SPIN_LOCK_UNLOCKED }; EXPORT_SYMBOL(rtas); struct rtas_suspend_me_data { atomic_t working; /* number of cpus accessing this struct */ + atomic_t done; int token; /* ibm,suspend-me */ int error; struct completion *complete; /* wait on this until working == 0 */ @@ -67,6 +70,28 @@ unsigned long rtas_rmo_buf; void (*rtas_flash_term_hook)(int); EXPORT_SYMBOL(rtas_flash_term_hook); +/* RTAS use home made raw locking instead of spin_lock_irqsave + * because those can be called from within really nasty contexts + * such as having the timebase stopped which would lockup with + * normal locks and spinlock debugging enabled + */ +static unsigned long lock_rtas(void) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + arch_spin_lock_flags(&rtas.lock, flags); + return flags; +} + +static void unlock_rtas(unsigned long flags) +{ + arch_spin_unlock(&rtas.lock); + local_irq_restore(flags); + preempt_enable(); +} + /* * call_rtas_display_status and call_rtas_display_status_delay * are designed only for very early low-level debugging, which @@ -79,7 +104,7 @@ static void call_rtas_display_status(char c) if (!rtas.base) return; - spin_lock_irqsave(&rtas.lock, s); + s = lock_rtas(); args->token = 10; args->nargs = 1; @@ -89,7 +114,7 @@ static void call_rtas_display_status(char c) enter_rtas(__pa(args)); - spin_unlock_irqrestore(&rtas.lock, s); + unlock_rtas(s); } static void call_rtas_display_status_delay(char c) @@ -341,8 +366,8 @@ int rtas_get_error_log_max(void) EXPORT_SYMBOL(rtas_get_error_log_max); -char rtas_err_buf[RTAS_ERROR_LOG_MAX]; -int rtas_last_error_token; +static char rtas_err_buf[RTAS_ERROR_LOG_MAX]; +static int rtas_last_error_token; /** Return a copy of the detailed error text associated with the * most recent failed call to rtas. Because the error text @@ -411,8 +436,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) return -1; - /* Gotta do something different here, use global lock for now... */ - spin_lock_irqsave(&rtas.lock, s); + s = lock_rtas(); rtas_args = &rtas.args; rtas_args->token = token; @@ -439,8 +463,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) outputs[i] = rtas_args->rets[i+1]; ret = (nret > 0)? rtas_args->rets[0]: 0; - /* Gotta do something different here, use global lock for now... */ - spin_unlock_irqrestore(&rtas.lock, s); + unlock_rtas(s); if (buff_copy) { log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); @@ -485,7 +508,7 @@ unsigned int rtas_busy_delay(int status) } EXPORT_SYMBOL(rtas_busy_delay); -int rtas_error_rc(int rtas_rc) +static int rtas_error_rc(int rtas_rc) { int rc; @@ -507,7 +530,7 @@ int rtas_error_rc(int rtas_rc) break; default: printk(KERN_ERR "%s: unexpected RTAS error %d\n", - __FUNCTION__, rtas_rc); + __func__, rtas_rc); rc = -ERANGE; break; } @@ -567,6 +590,32 @@ int rtas_get_sensor(int sensor, int index, int *state) } EXPORT_SYMBOL(rtas_get_sensor); +bool rtas_indicator_present(int token, int *maxindex) +{ + int proplen, count, i; + const struct indicator_elem { + u32 token; + u32 maxindex; + } *indicators; + + indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen); + if (!indicators) + return false; + + count = proplen / sizeof(struct indicator_elem); + + for (i = 0; i < count; i++) { + if (indicators[i].token != token) + continue; + if (maxindex) + *maxindex = indicators[i].maxindex; + return true; + } + + return false; +} +EXPORT_SYMBOL(rtas_indicator_present); + int rtas_set_indicator(int indicator, int index, int new_value) { int token = rtas_token("set-indicator"); @@ -664,8 +713,9 @@ static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; #ifdef CONFIG_PPC_PSERIES static void rtas_percpu_suspend_me(void *info) { - long rc; + long rc = H_SUCCESS; unsigned long msr_save; + u16 slb_size = mmu_slb_size; int cpu; struct rtas_suspend_me_data *data = (struct rtas_suspend_me_data *)info; @@ -676,7 +726,8 @@ static void rtas_percpu_suspend_me(void *info) msr_save = mfmsr(); mtmsr(msr_save & ~(MSR_EE)); - rc = plpar_hcall_norets(H_JOIN); + while (rc == H_SUCCESS && !atomic_read(&data->done)) + rc = plpar_hcall_norets(H_JOIN); mtmsr(msr_save); @@ -687,18 +738,24 @@ static void rtas_percpu_suspend_me(void *info) /* All other cpus are in H_JOIN, this cpu does * the suspend. */ + slb_set_size(SLB_MIN_SIZE); printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); data->error = rtas_call(data->token, 0, 1, NULL); - if (data->error) + if (data->error) { printk(KERN_DEBUG "ibm,suspend-me returned %d\n", data->error); + slb_set_size(slb_size); + } } else { printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n", smp_processor_id(), rc); data->error = rc; } + + atomic_set(&data->done, 1); + /* This cpu did the suspend or got an error; in either case, * we need to prod all other other cpus out of join state. * Extra prods are harmless. @@ -741,6 +798,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) } atomic_set(&data.working, 0); + atomic_set(&data.done, 0); data.token = rtas_token("ibm,suspend-me"); data.error = 0; data.complete = &done; @@ -748,7 +806,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) /* Call function on all CPUs. One of us will make the * rtas call */ - if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0)) + if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) data.error = -EINVAL; wait_for_completion(&done); @@ -793,6 +851,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) if (args.token == RTAS_UNKNOWN_SERVICE) return -EINVAL; + args.rets = &args.args[nargs]; + memset(args.rets, 0, args.nret * sizeof(rtas_arg_t)); + /* Need to handle ibm,suspend_me call specially */ if (args.token == ibm_suspend_me_token) { rc = rtas_ibm_suspend_me(&args); @@ -803,20 +864,18 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) buff_copy = get_errorlog_buffer(); - spin_lock_irqsave(&rtas.lock, flags); + flags = lock_rtas(); rtas.args = args; enter_rtas(__pa(&rtas.args)); args = rtas.args; - args.rets = &args.args[nargs]; - /* A -1 return code indicates that the last command couldn't be completed due to a hardware error. */ if (args.rets[0] == -1) errbuf = __fetch_rtas_last_error(buff_copy); - spin_unlock_irqrestore(&rtas.lock, flags); + unlock_rtas(flags); if (buff_copy) { if (errbuf) @@ -919,3 +978,33 @@ int __init early_init_dt_scan_rtas(unsigned long node, /* break now */ return 1; } + +static arch_spinlock_t timebase_lock; +static u64 timebase = 0; + +void __cpuinit rtas_give_timebase(void) +{ + unsigned long flags; + + local_irq_save(flags); + hard_irq_disable(); + arch_spin_lock(&timebase_lock); + rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); + timebase = get_tb(); + arch_spin_unlock(&timebase_lock); + + while (timebase) + barrier(); + rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); + local_irq_restore(flags); +} + +void __cpuinit rtas_take_timebase(void) +{ + while (!timebase) + barrier(); + arch_spin_lock(&timebase_lock); + set_tb(timebase >> 32, timebase & 0xffffffff); + timebase = 0; + arch_spin_unlock(&timebase_lock); +}