sh: clkfwk: Rework legacy CPG clock handling.
[safe/jmp/linux-2.6] / arch / sh / kernel / cpu / clock.c
1 /*
2  * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3  *
4  *  Copyright (C) 2005 - 2009  Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  *      Copyright (C) 2004 - 2008 Nokia Corporation
9  *      Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10  *
11  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12  *
13  *  With clkdev bits:
14  *
15  *      Copyright (C) 2008 Russell King.
16  *
17  * This file is subject to the terms and conditions of the GNU General Public
18  * License.  See the file "COPYING" in the main directory of this archive
19  * for more details.
20  */
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/list.h>
26 #include <linux/kobject.h>
27 #include <linux/sysdev.h>
28 #include <linux/seq_file.h>
29 #include <linux/err.h>
30 #include <linux/platform_device.h>
31 #include <linux/proc_fs.h>
32 #include <asm/clock.h>
33 #include <asm/machvec.h>
34
35 static LIST_HEAD(clock_list);
36 static DEFINE_SPINLOCK(clock_lock);
37 static DEFINE_MUTEX(clock_list_sem);
38
39 /* Used for clocks that always have same value as the parent clock */
40 unsigned long followparent_recalc(struct clk *clk)
41 {
42         return clk->parent->rate;
43 }
44
45 int clk_reparent(struct clk *child, struct clk *parent)
46 {
47         list_del_init(&child->sibling);
48         if (parent)
49                 list_add(&child->sibling, &parent->children);
50         child->parent = parent;
51
52         /* now do the debugfs renaming to reattach the child
53            to the proper parent */
54
55         return 0;
56 }
57
58 /* Propagate rate to children */
59 void propagate_rate(struct clk *tclk)
60 {
61         struct clk *clkp;
62
63         list_for_each_entry(clkp, &tclk->children, sibling) {
64                 if (clkp->ops && clkp->ops->recalc)
65                         clkp->rate = clkp->ops->recalc(clkp);
66                 propagate_rate(clkp);
67         }
68 }
69
70 static void __clk_disable(struct clk *clk)
71 {
72         if (clk->usecount == 0) {
73                 printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
74                        clk->name);
75                 WARN_ON(1);
76                 return;
77         }
78
79         if (!(--clk->usecount)) {
80                 if (likely(clk->ops && clk->ops->disable))
81                         clk->ops->disable(clk);
82                 if (likely(clk->parent))
83                         __clk_disable(clk->parent);
84         }
85 }
86
87 void clk_disable(struct clk *clk)
88 {
89         unsigned long flags;
90
91         if (!clk)
92                 return;
93
94         spin_lock_irqsave(&clock_lock, flags);
95         __clk_disable(clk);
96         spin_unlock_irqrestore(&clock_lock, flags);
97 }
98 EXPORT_SYMBOL_GPL(clk_disable);
99
100 static int __clk_enable(struct clk *clk)
101 {
102         int ret = 0;
103
104         if (clk->usecount++ == 0) {
105                 if (clk->parent) {
106                         ret = __clk_enable(clk->parent);
107                         if (unlikely(ret))
108                                 goto err;
109                 }
110
111                 if (clk->ops && clk->ops->enable) {
112                         ret = clk->ops->enable(clk);
113                         if (ret) {
114                                 if (clk->parent)
115                                         __clk_disable(clk->parent);
116                                 goto err;
117                         }
118                 }
119         }
120
121         return ret;
122 err:
123         clk->usecount--;
124         return ret;
125 }
126
127 int clk_enable(struct clk *clk)
128 {
129         unsigned long flags;
130         int ret;
131
132         if (!clk)
133                 return -EINVAL;
134
135         spin_lock_irqsave(&clock_lock, flags);
136         ret = __clk_enable(clk);
137         spin_unlock_irqrestore(&clock_lock, flags);
138
139         return ret;
140 }
141 EXPORT_SYMBOL_GPL(clk_enable);
142
143 static LIST_HEAD(root_clks);
144
145 /**
146  * recalculate_root_clocks - recalculate and propagate all root clocks
147  *
148  * Recalculates all root clocks (clocks with no parent), which if the
149  * clock's .recalc is set correctly, should also propagate their rates.
150  * Called at init.
151  */
152 void recalculate_root_clocks(void)
153 {
154         struct clk *clkp;
155
156         list_for_each_entry(clkp, &root_clks, sibling) {
157                 if (clkp->ops && clkp->ops->recalc)
158                         clkp->rate = clkp->ops->recalc(clkp);
159                 propagate_rate(clkp);
160         }
161 }
162
163 int clk_register(struct clk *clk)
164 {
165         if (clk == NULL || IS_ERR(clk))
166                 return -EINVAL;
167
168         /*
169          * trap out already registered clocks
170          */
171         if (clk->node.next || clk->node.prev)
172                 return 0;
173
174         mutex_lock(&clock_list_sem);
175
176         INIT_LIST_HEAD(&clk->children);
177         clk->usecount = 0;
178
179         if (clk->parent)
180                 list_add(&clk->sibling, &clk->parent->children);
181         else
182                 list_add(&clk->sibling, &root_clks);
183
184         list_add(&clk->node, &clock_list);
185         if (clk->ops && clk->ops->init)
186                 clk->ops->init(clk);
187         mutex_unlock(&clock_list_sem);
188
189         return 0;
190 }
191 EXPORT_SYMBOL_GPL(clk_register);
192
193 void clk_unregister(struct clk *clk)
194 {
195         mutex_lock(&clock_list_sem);
196         list_del(&clk->sibling);
197         list_del(&clk->node);
198         mutex_unlock(&clock_list_sem);
199 }
200 EXPORT_SYMBOL_GPL(clk_unregister);
201
202 static void clk_enable_init_clocks(void)
203 {
204         struct clk *clkp;
205
206         list_for_each_entry(clkp, &clock_list, node)
207                 if (clkp->flags & CLK_ENABLE_ON_INIT)
208                         clk_enable(clkp);
209 }
210
211 unsigned long clk_get_rate(struct clk *clk)
212 {
213         return clk->rate;
214 }
215 EXPORT_SYMBOL_GPL(clk_get_rate);
216
217 int clk_set_rate(struct clk *clk, unsigned long rate)
218 {
219         return clk_set_rate_ex(clk, rate, 0);
220 }
221 EXPORT_SYMBOL_GPL(clk_set_rate);
222
223 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
224 {
225         int ret = -EOPNOTSUPP;
226         unsigned long flags;
227
228         spin_lock_irqsave(&clock_lock, flags);
229
230         if (likely(clk->ops && clk->ops->set_rate)) {
231                 ret = clk->ops->set_rate(clk, rate, algo_id);
232                 if (ret != 0)
233                         goto out_unlock;
234         } else {
235                 clk->rate = rate;
236                 ret = 0;
237         }
238
239         if (clk->ops && clk->ops->recalc)
240                 clk->rate = clk->ops->recalc(clk);
241
242         propagate_rate(clk);
243
244 out_unlock:
245         spin_unlock_irqrestore(&clock_lock, flags);
246
247         return ret;
248 }
249 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
250
251 int clk_set_parent(struct clk *clk, struct clk *parent)
252 {
253         unsigned long flags;
254         int ret = -EINVAL;
255
256         if (!parent || !clk)
257                 return ret;
258         if (clk->parent == parent)
259                 return 0;
260
261         spin_lock_irqsave(&clock_lock, flags);
262         if (clk->usecount == 0) {
263                 if (clk->ops->set_parent)
264                         ret = clk->ops->set_parent(clk, parent);
265                 else
266                         ret = clk_reparent(clk, parent);
267
268                 if (ret == 0) {
269                         pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
270                                  clk->name, clk->parent->name, clk->rate);
271                         if (clk->ops->recalc)
272                                 clk->rate = clk->ops->recalc(clk);
273                         propagate_rate(clk);
274                 }
275         } else
276                 ret = -EBUSY;
277         spin_unlock_irqrestore(&clock_lock, flags);
278
279         return ret;
280 }
281 EXPORT_SYMBOL_GPL(clk_set_parent);
282
283 struct clk *clk_get_parent(struct clk *clk)
284 {
285         return clk->parent;
286 }
287 EXPORT_SYMBOL_GPL(clk_get_parent);
288
289 long clk_round_rate(struct clk *clk, unsigned long rate)
290 {
291         if (likely(clk->ops && clk->ops->round_rate)) {
292                 unsigned long flags, rounded;
293
294                 spin_lock_irqsave(&clock_lock, flags);
295                 rounded = clk->ops->round_rate(clk, rate);
296                 spin_unlock_irqrestore(&clock_lock, flags);
297
298                 return rounded;
299         }
300
301         return clk_get_rate(clk);
302 }
303 EXPORT_SYMBOL_GPL(clk_round_rate);
304
305 /*
306  * Find the correct struct clk for the device and connection ID.
307  * We do slightly fuzzy matching here:
308  *  An entry with a NULL ID is assumed to be a wildcard.
309  *  If an entry has a device ID, it must match
310  *  If an entry has a connection ID, it must match
311  * Then we take the most specific entry - with the following
312  * order of precidence: dev+con > dev only > con only.
313  */
314 static struct clk *clk_find(const char *dev_id, const char *con_id)
315 {
316         struct clk_lookup *p;
317         struct clk *clk = NULL;
318         int match, best = 0;
319
320         list_for_each_entry(p, &clock_list, node) {
321                 match = 0;
322                 if (p->dev_id) {
323                         if (!dev_id || strcmp(p->dev_id, dev_id))
324                                 continue;
325                         match += 2;
326                 }
327                 if (p->con_id) {
328                         if (!con_id || strcmp(p->con_id, con_id))
329                                 continue;
330                         match += 1;
331                 }
332                 if (match == 0)
333                         continue;
334
335                 if (match > best) {
336                         clk = p->clk;
337                         best = match;
338                 }
339         }
340         return clk;
341 }
342
343 struct clk *clk_get_sys(const char *dev_id, const char *con_id)
344 {
345         struct clk *clk;
346
347         mutex_lock(&clock_list_sem);
348         clk = clk_find(dev_id, con_id);
349         mutex_unlock(&clock_list_sem);
350
351         return clk ? clk : ERR_PTR(-ENOENT);
352 }
353 EXPORT_SYMBOL_GPL(clk_get_sys);
354
355 /*
356  * Returns a clock. Note that we first try to use device id on the bus
357  * and clock name. If this fails, we try to use clock name only.
358  */
359 struct clk *clk_get(struct device *dev, const char *id)
360 {
361         const char *dev_id = dev ? dev_name(dev) : NULL;
362         struct clk *p, *clk = ERR_PTR(-ENOENT);
363         int idno;
364
365         clk = clk_get_sys(dev_id, id);
366         if (clk && !IS_ERR(clk))
367                 return clk;
368
369         if (dev == NULL || dev->bus != &platform_bus_type)
370                 idno = -1;
371         else
372                 idno = to_platform_device(dev)->id;
373
374         mutex_lock(&clock_list_sem);
375         list_for_each_entry(p, &clock_list, node) {
376                 if (p->id == idno &&
377                     strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
378                         clk = p;
379                         goto found;
380                 }
381         }
382
383         list_for_each_entry(p, &clock_list, node) {
384                 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
385                         clk = p;
386                         break;
387                 }
388         }
389
390 found:
391         mutex_unlock(&clock_list_sem);
392
393         return clk;
394 }
395 EXPORT_SYMBOL_GPL(clk_get);
396
397 void clk_put(struct clk *clk)
398 {
399         if (clk && !IS_ERR(clk))
400                 module_put(clk->owner);
401 }
402 EXPORT_SYMBOL_GPL(clk_put);
403
404
405 static int show_clocks(char *buf, char **start, off_t off,
406                        int len, int *eof, void *data)
407 {
408         struct clk *clk;
409         char *p = buf;
410
411         list_for_each_entry_reverse(clk, &clock_list, node) {
412                 unsigned long rate = clk_get_rate(clk);
413
414                 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
415                              rate / 1000000, (rate % 1000000) / 10000,
416                               (clk->usecount > 0) ?  "enabled" : "disabled");
417         }
418
419         return p - buf;
420 }
421
422 #ifdef CONFIG_PM
423 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
424 {
425         static pm_message_t prev_state;
426         struct clk *clkp;
427
428         switch (state.event) {
429         case PM_EVENT_ON:
430                 /* Resumeing from hibernation */
431                 if (prev_state.event != PM_EVENT_FREEZE)
432                         break;
433
434                 list_for_each_entry(clkp, &clock_list, node) {
435                         if (likely(clkp->ops)) {
436                                 unsigned long rate = clkp->rate;
437
438                                 if (likely(clkp->ops->set_parent))
439                                         clkp->ops->set_parent(clkp,
440                                                 clkp->parent);
441                                 if (likely(clkp->ops->set_rate))
442                                         clkp->ops->set_rate(clkp,
443                                                 rate, NO_CHANGE);
444                                 else if (likely(clkp->ops->recalc))
445                                         clkp->rate = clkp->ops->recalc(clkp);
446                         }
447                 }
448                 break;
449         case PM_EVENT_FREEZE:
450                 break;
451         case PM_EVENT_SUSPEND:
452                 break;
453         }
454
455         prev_state = state;
456         return 0;
457 }
458
459 static int clks_sysdev_resume(struct sys_device *dev)
460 {
461         return clks_sysdev_suspend(dev, PMSG_ON);
462 }
463
464 static struct sysdev_class clks_sysdev_class = {
465         .name = "clks",
466 };
467
468 static struct sysdev_driver clks_sysdev_driver = {
469         .suspend = clks_sysdev_suspend,
470         .resume = clks_sysdev_resume,
471 };
472
473 static struct sys_device clks_sysdev_dev = {
474         .cls = &clks_sysdev_class,
475 };
476
477 static int __init clk_sysdev_init(void)
478 {
479         sysdev_class_register(&clks_sysdev_class);
480         sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
481         sysdev_register(&clks_sysdev_dev);
482
483         return 0;
484 }
485 subsys_initcall(clk_sysdev_init);
486 #endif
487
488 int __init clk_init(void)
489 {
490         int ret;
491
492         ret = arch_clk_init();
493         if (unlikely(ret)) {
494                 pr_err("%s: CPU clock registration failed.\n", __func__);
495                 return ret;
496         }
497
498         if (sh_mv.mv_clk_init) {
499                 ret = sh_mv.mv_clk_init();
500                 if (unlikely(ret)) {
501                         pr_err("%s: machvec clock initialization failed.\n",
502                                __func__);
503                         return ret;
504                 }
505         }
506
507         /* Kick the child clocks.. */
508         recalculate_root_clocks();
509
510         /* Enable the necessary init clocks */
511         clk_enable_init_clocks();
512
513         return ret;
514 }
515
516 static int __init clk_proc_init(void)
517 {
518         struct proc_dir_entry *p;
519         p = create_proc_read_entry("clocks", S_IRUSR, NULL,
520                                    show_clocks, NULL);
521         if (unlikely(!p))
522                 return -EINVAL;
523
524         return 0;
525 }
526 subsys_initcall(clk_proc_init);