2 * Common time routines among all ppc machines.
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
35 #include <linux/errno.h>
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
57 #include <asm/processor.h>
58 #include <asm/nvram.h>
59 #include <asm/cache.h>
60 #include <asm/machdep.h>
61 #include <asm/uaccess.h>
65 #include <asm/div64.h>
67 #include <asm/vdso_datapage.h>
69 #include <asm/firmware.h>
71 #ifdef CONFIG_PPC_ISERIES
72 #include <asm/iseries/it_lp_queue.h>
73 #include <asm/iseries/hv_call_xm.h>
77 /* keep track of when we need to update the rtc */
78 time_t last_rtc_update;
79 #ifdef CONFIG_PPC_ISERIES
80 static unsigned long __initdata iSeries_recal_titan;
81 static signed long __initdata iSeries_recal_tb;
84 /* The decrementer counts down by 128 every 128ns on a 601. */
85 #define DECREMENTER_COUNT_601 (1000000000 / HZ)
87 #define XSEC_PER_SEC (1024*1024)
90 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
92 /* compute ((xsec << 12) * max) >> 32 */
93 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
96 unsigned long tb_ticks_per_jiffy;
97 unsigned long tb_ticks_per_usec = 100; /* sane default */
98 EXPORT_SYMBOL(tb_ticks_per_usec);
99 unsigned long tb_ticks_per_sec;
100 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
104 #define TICKLEN_SCALE TICK_LENGTH_SHIFT
105 u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
106 u64 ticklen_to_xs; /* 0.64 fraction */
108 /* If last_tick_len corresponds to about 1/HZ seconds, then
109 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
110 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
112 DEFINE_SPINLOCK(rtc_lock);
113 EXPORT_SYMBOL_GPL(rtc_lock);
116 unsigned tb_to_ns_shift;
118 struct gettimeofday_struct do_gtod;
120 extern struct timezone sys_tz;
121 static long timezone_offset;
123 unsigned long ppc_proc_freq;
124 unsigned long ppc_tb_freq;
126 static u64 tb_last_jiffy __cacheline_aligned_in_smp;
127 static DEFINE_PER_CPU(u64, last_jiffy);
129 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
131 * Factors for converting from cputime_t (timebase ticks) to
132 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
133 * These are all stored as 0.64 fixed-point binary fractions.
135 u64 __cputime_jiffies_factor;
136 EXPORT_SYMBOL(__cputime_jiffies_factor);
137 u64 __cputime_msec_factor;
138 EXPORT_SYMBOL(__cputime_msec_factor);
139 u64 __cputime_sec_factor;
140 EXPORT_SYMBOL(__cputime_sec_factor);
141 u64 __cputime_clockt_factor;
142 EXPORT_SYMBOL(__cputime_clockt_factor);
144 static void calc_cputime_factors(void)
146 struct div_result res;
148 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
149 __cputime_jiffies_factor = res.result_low;
150 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
151 __cputime_msec_factor = res.result_low;
152 div128_by_32(1, 0, tb_ticks_per_sec, &res);
153 __cputime_sec_factor = res.result_low;
154 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
155 __cputime_clockt_factor = res.result_low;
159 * Read the PURR on systems that have it, otherwise the timebase.
161 static u64 read_purr(void)
163 if (cpu_has_feature(CPU_FTR_PURR))
164 return mfspr(SPRN_PURR);
169 * Account time for a transition between system, hard irq
172 void account_system_vtime(struct task_struct *tsk)
177 local_irq_save(flags);
179 delta = now - get_paca()->startpurr;
180 get_paca()->startpurr = now;
181 if (!in_interrupt()) {
182 delta += get_paca()->system_time;
183 get_paca()->system_time = 0;
185 account_system_time(tsk, 0, delta);
186 local_irq_restore(flags);
190 * Transfer the user and system times accumulated in the paca
191 * by the exception entry and exit code to the generic process
192 * user and system time records.
193 * Must be called with interrupts disabled.
195 void account_process_vtime(struct task_struct *tsk)
199 utime = get_paca()->user_time;
200 get_paca()->user_time = 0;
201 account_user_time(tsk, utime);
204 static void account_process_time(struct pt_regs *regs)
206 int cpu = smp_processor_id();
208 account_process_vtime(current);
210 if (rcu_pending(cpu))
211 rcu_check_callbacks(cpu, user_mode(regs));
213 run_posix_cpu_timers(current);
217 * Stuff for accounting stolen time.
219 struct cpu_purr_data {
220 int initialized; /* thread is running */
221 u64 tb; /* last TB value read */
222 u64 purr; /* last PURR value read */
226 * Each entry in the cpu_purr_data array is manipulated only by its
227 * "owner" cpu -- usually in the timer interrupt but also occasionally
228 * in process context for cpu online. As long as cpus do not touch
229 * each others' cpu_purr_data, disabling local interrupts is
230 * sufficient to serialize accesses.
232 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
234 static void snapshot_tb_and_purr(void *data)
237 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
239 local_irq_save(flags);
241 p->purr = mfspr(SPRN_PURR);
244 local_irq_restore(flags);
248 * Called during boot when all cpus have come up.
250 void snapshot_timebases(void)
252 if (!cpu_has_feature(CPU_FTR_PURR))
254 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
258 * Must be called with interrupts disabled.
260 void calculate_steal_time(void)
264 struct cpu_purr_data *pme;
266 if (!cpu_has_feature(CPU_FTR_PURR))
268 pme = &per_cpu(cpu_purr_data, smp_processor_id());
269 if (!pme->initialized)
270 return; /* this can happen in early boot */
272 purr = mfspr(SPRN_PURR);
273 stolen = (tb - pme->tb) - (purr - pme->purr);
275 account_steal_time(current, stolen);
280 #ifdef CONFIG_PPC_SPLPAR
282 * Must be called before the cpu is added to the online map when
283 * a cpu is being brought up at runtime.
285 static void snapshot_purr(void)
287 struct cpu_purr_data *pme;
290 if (!cpu_has_feature(CPU_FTR_PURR))
292 local_irq_save(flags);
293 pme = &per_cpu(cpu_purr_data, smp_processor_id());
295 pme->purr = mfspr(SPRN_PURR);
296 pme->initialized = 1;
297 local_irq_restore(flags);
300 #endif /* CONFIG_PPC_SPLPAR */
302 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
303 #define calc_cputime_factors()
304 #define account_process_time(regs) update_process_times(user_mode(regs))
305 #define calculate_steal_time() do { } while (0)
308 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
309 #define snapshot_purr() do { } while (0)
313 * Called when a cpu comes up after the system has finished booting,
314 * i.e. as a result of a hotplug cpu action.
316 void snapshot_timebase(void)
318 __get_cpu_var(last_jiffy) = get_tb();
322 void __delay(unsigned long loops)
330 /* the RTCL register wraps at 1000000000 */
331 diff = get_rtcl() - start;
334 } while (diff < loops);
337 while (get_tbl() - start < loops)
342 EXPORT_SYMBOL(__delay);
344 void udelay(unsigned long usecs)
346 __delay(tb_ticks_per_usec * usecs);
348 EXPORT_SYMBOL(udelay);
350 static __inline__ void timer_check_rtc(void)
353 * update the rtc when needed, this should be performed on the
354 * right fraction of a second. Half or full second ?
355 * Full second works on mk48t59 clocks, others need testing.
356 * Note that this update is basically only used through
357 * the adjtimex system calls. Setting the HW clock in
358 * any other way is a /dev/rtc and userland business.
359 * This is still wrong by -0.5/+1.5 jiffies because of the
360 * timer interrupt resolution and possible delay, but here we
361 * hit a quantization limit which can only be solved by higher
362 * resolution timers and decoupling time management from timer
363 * interrupts. This is also wrong on the clocks
364 * which require being written at the half second boundary.
365 * We should have an rtc call that only sets the minutes and
366 * seconds like on Intel to avoid problems with non UTC clocks.
368 if (ppc_md.set_rtc_time && ntp_synced() &&
369 xtime.tv_sec - last_rtc_update >= 659 &&
370 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
372 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
375 if (ppc_md.set_rtc_time(&tm) == 0)
376 last_rtc_update = xtime.tv_sec + 1;
378 /* Try again one minute later */
379 last_rtc_update += 60;
384 * This version of gettimeofday has microsecond resolution.
386 static inline void __do_gettimeofday(struct timeval *tv)
388 unsigned long sec, usec;
390 struct gettimeofday_vars *temp_varp;
391 u64 temp_tb_to_xs, temp_stamp_xsec;
394 * These calculations are faster (gets rid of divides)
395 * if done in units of 1/2^20 rather than microseconds.
396 * The conversion to microseconds at the end is done
397 * without a divide (and in fact, without a multiply)
399 temp_varp = do_gtod.varp;
401 /* Sampling the time base must be done after loading
402 * do_gtod.varp in order to avoid racing with update_gtod.
404 data_barrier(temp_varp);
405 tb_ticks = get_tb() - temp_varp->tb_orig_stamp;
406 temp_tb_to_xs = temp_varp->tb_to_xs;
407 temp_stamp_xsec = temp_varp->stamp_xsec;
408 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
409 sec = xsec / XSEC_PER_SEC;
410 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
411 usec = SCALE_XSEC(usec, 1000000);
417 void do_gettimeofday(struct timeval *tv)
420 /* do this the old way */
421 unsigned long flags, seq;
422 unsigned int sec, nsec, usec;
425 seq = read_seqbegin_irqsave(&xtime_lock, flags);
427 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
428 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
430 while (usec >= 1000000) {
438 __do_gettimeofday(tv);
441 EXPORT_SYMBOL(do_gettimeofday);
444 * There are two copies of tb_to_xs and stamp_xsec so that no
445 * lock is needed to access and use these values in
446 * do_gettimeofday. We alternate the copies and as long as a
447 * reasonable time elapses between changes, there will never
448 * be inconsistent values. ntpd has a minimum of one minute
451 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
455 struct gettimeofday_vars *temp_varp;
457 temp_idx = (do_gtod.var_idx == 0);
458 temp_varp = &do_gtod.vars[temp_idx];
460 temp_varp->tb_to_xs = new_tb_to_xs;
461 temp_varp->tb_orig_stamp = new_tb_stamp;
462 temp_varp->stamp_xsec = new_stamp_xsec;
464 do_gtod.varp = temp_varp;
465 do_gtod.var_idx = temp_idx;
468 * tb_update_count is used to allow the userspace gettimeofday code
469 * to assure itself that it sees a consistent view of the tb_to_xs and
470 * stamp_xsec variables. It reads the tb_update_count, then reads
471 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
472 * the two values of tb_update_count match and are even then the
473 * tb_to_xs and stamp_xsec values are consistent. If not, then it
474 * loops back and reads them again until this criteria is met.
475 * We expect the caller to have done the first increment of
476 * vdso_data->tb_update_count already.
478 vdso_data->tb_orig_stamp = new_tb_stamp;
479 vdso_data->stamp_xsec = new_stamp_xsec;
480 vdso_data->tb_to_xs = new_tb_to_xs;
481 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
482 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
484 ++(vdso_data->tb_update_count);
488 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
489 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
490 * difference tb - tb_orig_stamp small enough to always fit inside a
491 * 32 bits number. This is a requirement of our fast 32 bits userland
492 * implementation in the vdso. If we "miss" a call to this function
493 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
494 * with a too big difference, then the vdso will fallback to calling
497 static __inline__ void timer_recalc_offset(u64 cur_tb)
499 unsigned long offset;
502 u64 tb, xsec_old, xsec_new;
503 struct gettimeofday_vars *varp;
507 tlen = current_tick_length();
508 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
509 if (tlen == last_tick_len && offset < 0x80000000u)
511 if (tlen != last_tick_len) {
512 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
513 last_tick_len = tlen;
515 t2x = do_gtod.varp->tb_to_xs;
516 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
517 do_div(new_stamp_xsec, 1000000000);
518 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
520 ++vdso_data->tb_update_count;
524 * Make sure time doesn't go backwards for userspace gettimeofday.
528 xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
530 xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
531 if (xsec_new < xsec_old)
532 new_stamp_xsec += xsec_old - xsec_new;
534 update_gtod(cur_tb, new_stamp_xsec, t2x);
538 unsigned long profile_pc(struct pt_regs *regs)
540 unsigned long pc = instruction_pointer(regs);
542 if (in_lock_functions(pc))
547 EXPORT_SYMBOL(profile_pc);
550 #ifdef CONFIG_PPC_ISERIES
553 * This function recalibrates the timebase based on the 49-bit time-of-day
554 * value in the Titan chip. The Titan is much more accurate than the value
555 * returned by the service processor for the timebase frequency.
558 static int __init iSeries_tb_recal(void)
560 struct div_result divres;
561 unsigned long titan, tb;
563 /* Make sure we only run on iSeries */
564 if (!firmware_has_feature(FW_FEATURE_ISERIES))
568 titan = HvCallXm_loadTod();
569 if ( iSeries_recal_titan ) {
570 unsigned long tb_ticks = tb - iSeries_recal_tb;
571 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
572 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
573 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
574 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
576 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
577 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
579 if ( tick_diff < 0 ) {
580 tick_diff = -tick_diff;
584 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
585 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
586 new_tb_ticks_per_jiffy, sign, tick_diff );
587 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
588 tb_ticks_per_sec = new_tb_ticks_per_sec;
589 calc_cputime_factors();
590 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
591 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
592 tb_to_xs = divres.result_low;
593 do_gtod.varp->tb_to_xs = tb_to_xs;
594 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
595 vdso_data->tb_to_xs = tb_to_xs;
598 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
599 " new tb_ticks_per_jiffy = %lu\n"
600 " old tb_ticks_per_jiffy = %lu\n",
601 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
605 iSeries_recal_titan = titan;
606 iSeries_recal_tb = tb;
610 late_initcall(iSeries_tb_recal);
612 /* Called from platform early init */
613 void __init iSeries_time_init_early(void)
615 iSeries_recal_tb = get_tb();
616 iSeries_recal_titan = HvCallXm_loadTod();
618 #endif /* CONFIG_PPC_ISERIES */
621 * For iSeries shared processors, we have to let the hypervisor
622 * set the hardware decrementer. We set a virtual decrementer
623 * in the lppaca and call the hypervisor if the virtual
624 * decrementer is less than the current value in the hardware
625 * decrementer. (almost always the new decrementer value will
626 * be greater than the current hardware decementer so the hypervisor
627 * call will not be needed)
631 * timer_interrupt - gets called when the decrementer overflows,
632 * with interrupts disabled.
634 void timer_interrupt(struct pt_regs * regs)
636 struct pt_regs *old_regs;
638 int cpu = smp_processor_id();
643 if (atomic_read(&ppc_n_lost_interrupts) != 0)
647 old_regs = set_irq_regs(regs);
650 profile_tick(CPU_PROFILING);
651 calculate_steal_time();
653 #ifdef CONFIG_PPC_ISERIES
654 if (firmware_has_feature(FW_FEATURE_ISERIES))
655 get_lppaca()->int_dword.fields.decr_int = 0;
658 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
659 >= tb_ticks_per_jiffy) {
660 /* Update last_jiffy */
661 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
662 /* Handle RTCL overflow on 601 */
663 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
664 per_cpu(last_jiffy, cpu) -= 1000000000;
667 * We cannot disable the decrementer, so in the period
668 * between this cpu's being marked offline in cpu_online_map
669 * and calling stop-self, it is taking timer interrupts.
670 * Avoid calling into the scheduler rebalancing code if this
673 if (!cpu_is_offline(cpu))
674 account_process_time(regs);
677 * No need to check whether cpu is offline here; boot_cpuid
678 * should have been fixed up by now.
680 if (cpu != boot_cpuid)
683 write_seqlock(&xtime_lock);
684 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
685 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
686 tb_last_jiffy = tb_next_jiffy;
688 timer_recalc_offset(tb_last_jiffy);
691 write_sequnlock(&xtime_lock);
694 next_dec = tb_ticks_per_jiffy - ticks;
697 #ifdef CONFIG_PPC_ISERIES
698 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
699 process_hvlpevents();
703 /* collect purr register values often, for accurate calculations */
704 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
705 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
706 cu->current_tb = mfspr(SPRN_PURR);
711 set_irq_regs(old_regs);
714 void wakeup_decrementer(void)
719 * The timebase gets saved on sleep and restored on wakeup,
720 * so all we need to do is to reset the decrementer.
722 ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
723 if (ticks < tb_ticks_per_jiffy)
724 ticks = tb_ticks_per_jiffy - ticks;
731 void __init smp_space_timers(unsigned int max_cpus)
734 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
736 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
737 previous_tb -= tb_ticks_per_jiffy;
739 for_each_possible_cpu(i) {
742 per_cpu(last_jiffy, i) = previous_tb;
748 * Scheduler clock - returns current time in nanosec units.
750 * Note: mulhdu(a, b) (multiply high double unsigned) returns
751 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
752 * are 64-bit unsigned numbers.
754 unsigned long long sched_clock(void)
758 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
761 int do_settimeofday(struct timespec *tv)
763 time_t wtm_sec, new_sec = tv->tv_sec;
764 long wtm_nsec, new_nsec = tv->tv_nsec;
767 unsigned long tb_delta;
769 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
772 write_seqlock_irqsave(&xtime_lock, flags);
775 * Updating the RTC is not the job of this code. If the time is
776 * stepped under NTP, the RTC will be updated after STA_UNSYNC
777 * is cleared. Tools like clock/hwclock either copy the RTC
778 * to the system time, in which case there is no point in writing
779 * to the RTC again, or write to the RTC but then they don't call
780 * settimeofday to perform this operation.
783 /* Make userspace gettimeofday spin until we're done. */
784 ++vdso_data->tb_update_count;
788 * Subtract off the number of nanoseconds since the
789 * beginning of the last tick.
791 tb_delta = tb_ticks_since(tb_last_jiffy);
792 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
793 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
795 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
796 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
798 set_normalized_timespec(&xtime, new_sec, new_nsec);
799 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
801 /* In case of a large backwards jump in time with NTP, we want the
802 * clock to be updated as soon as the PLL is again in lock.
804 last_rtc_update = new_sec - 658;
808 new_xsec = xtime.tv_nsec;
810 new_xsec *= XSEC_PER_SEC;
811 do_div(new_xsec, NSEC_PER_SEC);
813 new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
814 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
816 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
817 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
819 write_sequnlock_irqrestore(&xtime_lock, flags);
824 EXPORT_SYMBOL(do_settimeofday);
826 static int __init get_freq(char *name, int cells, unsigned long *val)
828 struct device_node *cpu;
829 const unsigned int *fp;
832 /* The cpu node should have timebase and clock frequency properties */
833 cpu = of_find_node_by_type(NULL, "cpu");
836 fp = of_get_property(cpu, name, NULL);
839 *val = of_read_ulong(fp, cells);
848 void __init generic_calibrate_decr(void)
850 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
852 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
853 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
855 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
859 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
861 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
862 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
864 printk(KERN_ERR "WARNING: Estimating processor frequency "
869 /* Set the time base to zero */
873 /* Clear any pending timer interrupts */
874 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
876 /* Enable decrementer interrupt */
877 mtspr(SPRN_TCR, TCR_DIE);
881 unsigned long get_boot_time(void)
885 if (ppc_md.get_boot_time)
886 return ppc_md.get_boot_time();
887 if (!ppc_md.get_rtc_time)
889 ppc_md.get_rtc_time(&tm);
890 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
891 tm.tm_hour, tm.tm_min, tm.tm_sec);
894 /* This function is only called on the boot processor */
895 void __init time_init(void)
898 unsigned long tm = 0;
899 struct div_result res;
903 if (ppc_md.time_init != NULL)
904 timezone_offset = ppc_md.time_init();
907 /* 601 processor: dec counts down by 128 every 128ns */
908 ppc_tb_freq = 1000000000;
909 tb_last_jiffy = get_rtcl();
911 /* Normal PowerPC with timebase register */
912 ppc_md.calibrate_decr();
913 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
914 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
915 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
916 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
917 tb_last_jiffy = get_tb();
920 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
921 tb_ticks_per_sec = ppc_tb_freq;
922 tb_ticks_per_usec = ppc_tb_freq / 1000000;
923 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
924 calc_cputime_factors();
927 * Calculate the length of each tick in ns. It will not be
928 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
929 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
932 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
933 do_div(x, ppc_tb_freq);
935 last_tick_len = x << TICKLEN_SCALE;
938 * Compute ticklen_to_xs, which is a factor which gets multiplied
939 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
941 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
942 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
943 * which turns out to be N = 51 - SHIFT_HZ.
944 * This gives the result as a 0.64 fixed-point fraction.
945 * That value is reduced by an offset amounting to 1 xsec per
946 * 2^31 timebase ticks to avoid problems with time going backwards
947 * by 1 xsec when we do timer_recalc_offset due to losing the
948 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
949 * since there are 2^20 xsec in a second.
951 div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
952 tb_ticks_per_jiffy << SHIFT_HZ, &res);
953 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
954 ticklen_to_xs = res.result_low;
956 /* Compute tb_to_xs from tick_nsec */
957 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
960 * Compute scale factor for sched_clock.
961 * The calibrate_decr() function has set tb_ticks_per_sec,
962 * which is the timebase frequency.
963 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
964 * the 128-bit result as a 64.64 fixed-point number.
965 * We then shift that number right until it is less than 1.0,
966 * giving us the scale factor and shift count to use in
969 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
970 scale = res.result_low;
971 for (shift = 0; res.result_high != 0; ++shift) {
972 scale = (scale >> 1) | (res.result_high << 63);
973 res.result_high >>= 1;
975 tb_to_ns_scale = scale;
976 tb_to_ns_shift = shift;
978 tm = get_boot_time();
980 write_seqlock_irqsave(&xtime_lock, flags);
982 /* If platform provided a timezone (pmac), we correct the time */
983 if (timezone_offset) {
984 sys_tz.tz_minuteswest = -timezone_offset / 60;
985 sys_tz.tz_dsttime = 0;
986 tm -= timezone_offset;
991 do_gtod.varp = &do_gtod.vars[0];
993 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
994 __get_cpu_var(last_jiffy) = tb_last_jiffy;
995 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
996 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
997 do_gtod.varp->tb_to_xs = tb_to_xs;
998 do_gtod.tb_to_us = tb_to_us;
1000 vdso_data->tb_orig_stamp = tb_last_jiffy;
1001 vdso_data->tb_update_count = 0;
1002 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1003 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
1004 vdso_data->tb_to_xs = tb_to_xs;
1008 last_rtc_update = xtime.tv_sec;
1009 set_normalized_timespec(&wall_to_monotonic,
1010 -xtime.tv_sec, -xtime.tv_nsec);
1011 write_sequnlock_irqrestore(&xtime_lock, flags);
1013 /* Not exact, but the timer interrupt takes care of this */
1014 set_dec(tb_ticks_per_jiffy);
1019 #define STARTOFTIME 1970
1020 #define SECDAY 86400L
1021 #define SECYR (SECDAY * 365)
1022 #define leapyear(year) ((year) % 4 == 0 && \
1023 ((year) % 100 != 0 || (year) % 400 == 0))
1024 #define days_in_year(a) (leapyear(a) ? 366 : 365)
1025 #define days_in_month(a) (month_days[(a) - 1])
1027 static int month_days[12] = {
1028 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1032 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1034 void GregorianDay(struct rtc_time * tm)
1039 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1041 lastYear = tm->tm_year - 1;
1044 * Number of leap corrections to apply up to end of last year
1046 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1049 * This year is a leap year if it is divisible by 4 except when it is
1050 * divisible by 100 unless it is divisible by 400
1052 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1054 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1056 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1059 tm->tm_wday = day % 7;
1062 void to_tm(int tim, struct rtc_time * tm)
1065 register long hms, day;
1070 /* Hours, minutes, seconds are easy */
1071 tm->tm_hour = hms / 3600;
1072 tm->tm_min = (hms % 3600) / 60;
1073 tm->tm_sec = (hms % 3600) % 60;
1075 /* Number of years in days */
1076 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1077 day -= days_in_year(i);
1080 /* Number of months in days left */
1081 if (leapyear(tm->tm_year))
1082 days_in_month(FEBRUARY) = 29;
1083 for (i = 1; day >= days_in_month(i); i++)
1084 day -= days_in_month(i);
1085 days_in_month(FEBRUARY) = 28;
1088 /* Days are what is left over (+1) from all that. */
1089 tm->tm_mday = day + 1;
1092 * Determine the day of week
1097 /* Auxiliary function to compute scaling factors */
1098 /* Actually the choice of a timebase running at 1/4 the of the bus
1099 * frequency giving resolution of a few tens of nanoseconds is quite nice.
1100 * It makes this computation very precise (27-28 bits typically) which
1101 * is optimistic considering the stability of most processor clock
1102 * oscillators and the precision with which the timebase frequency
1103 * is measured but does not harm.
1105 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
1107 unsigned mlt=0, tmp, err;
1108 /* No concern for performance, it's done once: use a stupid
1109 * but safe and compact method to find the multiplier.
1112 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
1113 if (mulhwu(inscale, mlt|tmp) < outscale)
1117 /* We might still be off by 1 for the best approximation.
1118 * A side effect of this is that if outscale is too large
1119 * the returned value will be zero.
1120 * Many corner cases have been checked and seem to work,
1121 * some might have been forgotten in the test however.
1124 err = inscale * (mlt+1);
1125 if (err <= inscale/2)
1131 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1134 void div128_by_32(u64 dividend_high, u64 dividend_low,
1135 unsigned divisor, struct div_result *dr)
1137 unsigned long a, b, c, d;
1138 unsigned long w, x, y, z;
1141 a = dividend_high >> 32;
1142 b = dividend_high & 0xffffffff;
1143 c = dividend_low >> 32;
1144 d = dividend_low & 0xffffffff;
1147 ra = ((u64)(a - (w * divisor)) << 32) + b;
1149 rb = ((u64) do_div(ra, divisor) << 32) + c;
1152 rc = ((u64) do_div(rb, divisor) << 32) + d;
1155 do_div(rc, divisor);
1158 dr->result_high = ((u64)w << 32) + x;
1159 dr->result_low = ((u64)y << 32) + z;