blob: e604a6f80194a5fb52a0446ace4b1b6942dee73b [file] [log] [blame]
Paul Mundt253b0882009-05-13 17:38:11 +09001#include <linux/clk.h>
2#include <linux/compiler.h>
Magnus Damma1153e22009-05-28 13:11:31 +00003#include <linux/bootmem.h>
Magnus Damm6881e8b2009-05-28 12:52:29 +00004#include <linux/io.h>
Paul Mundt253b0882009-05-13 17:38:11 +09005#include <asm/clock.h>
6
Magnus Damm6881e8b2009-05-28 12:52:29 +00007static int sh_clk_mstp32_enable(struct clk *clk)
8{
9 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
10 clk->enable_reg);
11 return 0;
12}
13
14static void sh_clk_mstp32_disable(struct clk *clk)
15{
16 __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
17 clk->enable_reg);
18}
19
20static struct clk_ops sh_clk_mstp32_clk_ops = {
21 .enable = sh_clk_mstp32_enable,
22 .disable = sh_clk_mstp32_disable,
23 .recalc = followparent_recalc,
24};
25
26int __init sh_clk_mstp32_register(struct clk *clks, int nr)
27{
28 struct clk *clkp;
29 int ret = 0;
30 int k;
31
32 for (k = 0; !ret && (k < nr); k++) {
33 clkp = clks + k;
34 clkp->ops = &sh_clk_mstp32_clk_ops;
35 ret |= clk_register(clkp);
36 }
37
38 return ret;
39}
40
Magnus Damma1153e22009-05-28 13:11:31 +000041static unsigned long sh_clk_div4_recalc(struct clk *clk)
42{
43 struct clk_div_mult_table *table = clk->priv;
44 unsigned int idx;
45
46 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
47 table, &clk->arch_flags);
48
49 idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
50
51 return clk->freq_table[idx].frequency;
52}
53
54static long sh_clk_div4_round_rate(struct clk *clk, unsigned long rate)
55{
56 return clk_rate_table_round(clk, clk->freq_table, rate);
57}
58
59static struct clk_ops sh_clk_div4_clk_ops = {
60 .recalc = sh_clk_div4_recalc,
61 .round_rate = sh_clk_div4_round_rate,
62};
63
64int __init sh_clk_div4_register(struct clk *clks, int nr,
65 struct clk_div_mult_table *table)
66{
67 struct clk *clkp;
68 void *freq_table;
69 int nr_divs = table->nr_divisors;
70 int freq_table_size = sizeof(struct cpufreq_frequency_table);
71 int ret = 0;
72 int k;
73
Magnus Damma50de782009-06-02 08:43:59 +000074 freq_table_size *= (nr_divs + 1);
75
76 freq_table = alloc_bootmem(freq_table_size * nr);
Magnus Damma1153e22009-05-28 13:11:31 +000077 if (!freq_table)
78 return -ENOMEM;
79
80 for (k = 0; !ret && (k < nr); k++) {
81 clkp = clks + k;
82
83 clkp->ops = &sh_clk_div4_clk_ops;
84 clkp->id = -1;
85 clkp->priv = table;
86
87 clkp->freq_table = freq_table + (k * freq_table_size);
88 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
89
90 ret = clk_register(clkp);
91 }
92
93 return ret;
94}
95
Paul Mundt36aa1e32009-05-22 14:00:34 +090096#ifdef CONFIG_SH_CLK_CPG_LEGACY
Paul Mundt253b0882009-05-13 17:38:11 +090097static struct clk master_clk = {
98 .name = "master_clk",
99 .flags = CLK_ENABLE_ON_INIT,
100 .rate = CONFIG_SH_PCLK_FREQ,
101};
102
103static struct clk peripheral_clk = {
104 .name = "peripheral_clk",
105 .parent = &master_clk,
106 .flags = CLK_ENABLE_ON_INIT,
107};
108
109static struct clk bus_clk = {
110 .name = "bus_clk",
111 .parent = &master_clk,
112 .flags = CLK_ENABLE_ON_INIT,
113};
114
115static struct clk cpu_clk = {
116 .name = "cpu_clk",
117 .parent = &master_clk,
118 .flags = CLK_ENABLE_ON_INIT,
119};
120
121/*
122 * The ordering of these clocks matters, do not change it.
123 */
124static struct clk *onchip_clocks[] = {
125 &master_clk,
126 &peripheral_clk,
127 &bus_clk,
128 &cpu_clk,
129};
130
131int __init __deprecated cpg_clk_init(void)
132{
133 int i, ret = 0;
134
135 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
136 struct clk *clk = onchip_clocks[i];
137 arch_init_clk_ops(&clk->ops, i);
138 if (clk->ops)
139 ret |= clk_register(clk);
140 }
141
142 return ret;
143}
144
145/*
146 * Placeholder for compatability, until the lazy CPUs do this
147 * on their own.
148 */
149int __init __weak arch_clk_init(void)
150{
151 return cpg_clk_init();
152}
Paul Mundt36aa1e32009-05-22 14:00:34 +0900153#endif /* CONFIG_SH_CPG_CLK_LEGACY */