2 #include <linux/compiler.h>
3 #include <linux/slab.h>
7 static int sh_clk_mstp32_enable(struct clk *clk)
9 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
14 static void sh_clk_mstp32_disable(struct clk *clk)
16 __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
20 static struct clk_ops sh_clk_mstp32_clk_ops = {
21 .enable = sh_clk_mstp32_enable,
22 .disable = sh_clk_mstp32_disable,
23 .recalc = followparent_recalc,
26 int __init sh_clk_mstp32_register(struct clk *clks, int nr)
32 for (k = 0; !ret && (k < nr); k++) {
34 clkp->ops = &sh_clk_mstp32_clk_ops;
35 ret |= clk_register(clkp);
41 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
43 return clk_rate_table_round(clk, clk->freq_table, rate);
46 static int sh_clk_div6_divisors[64] = {
47 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
48 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
49 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
50 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
53 static struct clk_div_mult_table sh_clk_div6_table = {
54 .divisors = sh_clk_div6_divisors,
55 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
58 static unsigned long sh_clk_div6_recalc(struct clk *clk)
60 struct clk_div_mult_table *table = &sh_clk_div6_table;
63 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
66 idx = __raw_readl(clk->enable_reg) & 0x003f;
68 return clk->freq_table[idx].frequency;
71 static int sh_clk_div6_set_rate(struct clk *clk,
72 unsigned long rate, int algo_id)
77 idx = clk_rate_table_find(clk, clk->freq_table, rate);
81 value = __raw_readl(clk->enable_reg);
84 __raw_writel(value, clk->enable_reg);
88 static int sh_clk_div6_enable(struct clk *clk)
93 ret = sh_clk_div6_set_rate(clk, clk->rate, 0);
95 value = __raw_readl(clk->enable_reg);
96 value &= ~0x100; /* clear stop bit to enable clock */
97 __raw_writel(value, clk->enable_reg);
102 static void sh_clk_div6_disable(struct clk *clk)
106 value = __raw_readl(clk->enable_reg);
107 value |= 0x100; /* stop clock */
108 value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
109 __raw_writel(value, clk->enable_reg);
112 static struct clk_ops sh_clk_div6_clk_ops = {
113 .recalc = sh_clk_div6_recalc,
114 .round_rate = sh_clk_div_round_rate,
115 .set_rate = sh_clk_div6_set_rate,
116 .enable = sh_clk_div6_enable,
117 .disable = sh_clk_div6_disable,
120 int __init sh_clk_div6_register(struct clk *clks, int nr)
124 int nr_divs = sh_clk_div6_table.nr_divisors;
125 int freq_table_size = sizeof(struct cpufreq_frequency_table);
129 freq_table_size *= (nr_divs + 1);
130 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
132 pr_err("sh_clk_div6_register: unable to alloc memory\n");
136 for (k = 0; !ret && (k < nr); k++) {
139 clkp->ops = &sh_clk_div6_clk_ops;
141 clkp->freq_table = freq_table + (k * freq_table_size);
142 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
144 ret = clk_register(clkp);
150 static unsigned long sh_clk_div4_recalc(struct clk *clk)
152 struct clk_div_mult_table *table = clk->priv;
155 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
156 table, &clk->arch_flags);
158 idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
160 return clk->freq_table[idx].frequency;
163 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
165 struct clk_div_mult_table *table = clk->priv;
169 if (!strcmp("pll_clk", parent->name))
170 value = __raw_readl(clk->enable_reg) & ~(1 << 7);
172 value = __raw_readl(clk->enable_reg) | (1 << 7);
174 ret = clk_reparent(clk, parent);
178 __raw_writel(value, clk->enable_reg);
180 /* Rebiuld the frequency table */
181 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
182 table, &clk->arch_flags);
187 static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
190 int idx = clk_rate_table_find(clk, clk->freq_table, rate);
194 value = __raw_readl(clk->enable_reg);
195 value &= ~(0xf << clk->enable_bit);
196 value |= (idx << clk->enable_bit);
197 __raw_writel(value, clk->enable_reg);
202 static int sh_clk_div4_enable(struct clk *clk)
204 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
208 static void sh_clk_div4_disable(struct clk *clk)
210 __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
213 static struct clk_ops sh_clk_div4_clk_ops = {
214 .recalc = sh_clk_div4_recalc,
215 .set_rate = sh_clk_div4_set_rate,
216 .round_rate = sh_clk_div_round_rate,
219 static struct clk_ops sh_clk_div4_enable_clk_ops = {
220 .recalc = sh_clk_div4_recalc,
221 .set_rate = sh_clk_div4_set_rate,
222 .round_rate = sh_clk_div_round_rate,
223 .enable = sh_clk_div4_enable,
224 .disable = sh_clk_div4_disable,
227 static struct clk_ops sh_clk_div4_reparent_clk_ops = {
228 .recalc = sh_clk_div4_recalc,
229 .set_rate = sh_clk_div4_set_rate,
230 .round_rate = sh_clk_div_round_rate,
231 .enable = sh_clk_div4_enable,
232 .disable = sh_clk_div4_disable,
233 .set_parent = sh_clk_div4_set_parent,
236 static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
237 struct clk_div_mult_table *table, struct clk_ops *ops)
241 int nr_divs = table->nr_divisors;
242 int freq_table_size = sizeof(struct cpufreq_frequency_table);
246 freq_table_size *= (nr_divs + 1);
247 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
249 pr_err("sh_clk_div4_register: unable to alloc memory\n");
253 for (k = 0; !ret && (k < nr); k++) {
260 clkp->freq_table = freq_table + (k * freq_table_size);
261 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
263 ret = clk_register(clkp);
269 int __init sh_clk_div4_register(struct clk *clks, int nr,
270 struct clk_div_mult_table *table)
272 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
275 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
276 struct clk_div_mult_table *table)
278 return sh_clk_div4_register_ops(clks, nr, table,
279 &sh_clk_div4_enable_clk_ops);
282 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
283 struct clk_div_mult_table *table)
285 return sh_clk_div4_register_ops(clks, nr, table,
286 &sh_clk_div4_reparent_clk_ops);
289 #ifdef CONFIG_SH_CLK_CPG_LEGACY
290 static struct clk master_clk = {
291 .name = "master_clk",
292 .flags = CLK_ENABLE_ON_INIT,
293 .rate = CONFIG_SH_PCLK_FREQ,
296 static struct clk peripheral_clk = {
297 .name = "peripheral_clk",
298 .parent = &master_clk,
299 .flags = CLK_ENABLE_ON_INIT,
302 static struct clk bus_clk = {
304 .parent = &master_clk,
305 .flags = CLK_ENABLE_ON_INIT,
308 static struct clk cpu_clk = {
310 .parent = &master_clk,
311 .flags = CLK_ENABLE_ON_INIT,
315 * The ordering of these clocks matters, do not change it.
317 static struct clk *onchip_clocks[] = {
324 int __init __deprecated cpg_clk_init(void)
328 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
329 struct clk *clk = onchip_clocks[i];
330 arch_init_clk_ops(&clk->ops, i);
332 ret |= clk_register(clk);
339 * Placeholder for compatability, until the lazy CPUs do this
342 int __init __weak arch_clk_init(void)
344 return cpg_clk_init();
346 #endif /* CONFIG_SH_CPG_CLK_LEGACY */