blob: 88fc30d2f5fd9b59379389fbddc194dd7ad26f9e [file] [log] [blame]
Paul Mundt253b0882009-05-13 17:38:11 +09001#include <linux/clk.h>
2#include <linux/compiler.h>
Magnus Damma1153e22009-05-28 13:11:31 +00003#include <linux/bootmem.h>
Magnus Damm6881e8b2009-05-28 12:52:29 +00004#include <linux/io.h>
Paul Mundt253b0882009-05-13 17:38:11 +09005#include <asm/clock.h>
6
Magnus Damm6881e8b2009-05-28 12:52:29 +00007static int sh_clk_mstp32_enable(struct clk *clk)
8{
9 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
10 clk->enable_reg);
11 return 0;
12}
13
14static void sh_clk_mstp32_disable(struct clk *clk)
15{
16 __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
17 clk->enable_reg);
18}
19
20static struct clk_ops sh_clk_mstp32_clk_ops = {
21 .enable = sh_clk_mstp32_enable,
22 .disable = sh_clk_mstp32_disable,
23 .recalc = followparent_recalc,
24};
25
26int __init sh_clk_mstp32_register(struct clk *clks, int nr)
27{
28 struct clk *clkp;
29 int ret = 0;
30 int k;
31
32 for (k = 0; !ret && (k < nr); k++) {
33 clkp = clks + k;
34 clkp->ops = &sh_clk_mstp32_clk_ops;
35 ret |= clk_register(clkp);
36 }
37
38 return ret;
39}
40
Magnus Damma1153e22009-05-28 13:11:31 +000041static unsigned long sh_clk_div4_recalc(struct clk *clk)
42{
43 struct clk_div_mult_table *table = clk->priv;
44 unsigned int idx;
45
46 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
47 table, &clk->arch_flags);
48
49 idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
50
51 return clk->freq_table[idx].frequency;
52}
53
54static long sh_clk_div4_round_rate(struct clk *clk, unsigned long rate)
55{
56 return clk_rate_table_round(clk, clk->freq_table, rate);
57}
58
59static struct clk_ops sh_clk_div4_clk_ops = {
60 .recalc = sh_clk_div4_recalc,
61 .round_rate = sh_clk_div4_round_rate,
62};
63
64int __init sh_clk_div4_register(struct clk *clks, int nr,
65 struct clk_div_mult_table *table)
66{
67 struct clk *clkp;
68 void *freq_table;
69 int nr_divs = table->nr_divisors;
70 int freq_table_size = sizeof(struct cpufreq_frequency_table);
71 int ret = 0;
72 int k;
73
74 k = nr_divs + 1;
75 freq_table = alloc_bootmem(freq_table_size * nr * (nr_divs + 1));
76 if (!freq_table)
77 return -ENOMEM;
78
79 for (k = 0; !ret && (k < nr); k++) {
80 clkp = clks + k;
81
82 clkp->ops = &sh_clk_div4_clk_ops;
83 clkp->id = -1;
84 clkp->priv = table;
85
86 clkp->freq_table = freq_table + (k * freq_table_size);
87 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
88
89 ret = clk_register(clkp);
90 }
91
92 return ret;
93}
94
Paul Mundt36aa1e32009-05-22 14:00:34 +090095#ifdef CONFIG_SH_CLK_CPG_LEGACY
Paul Mundt253b0882009-05-13 17:38:11 +090096static struct clk master_clk = {
97 .name = "master_clk",
98 .flags = CLK_ENABLE_ON_INIT,
99 .rate = CONFIG_SH_PCLK_FREQ,
100};
101
102static struct clk peripheral_clk = {
103 .name = "peripheral_clk",
104 .parent = &master_clk,
105 .flags = CLK_ENABLE_ON_INIT,
106};
107
108static struct clk bus_clk = {
109 .name = "bus_clk",
110 .parent = &master_clk,
111 .flags = CLK_ENABLE_ON_INIT,
112};
113
114static struct clk cpu_clk = {
115 .name = "cpu_clk",
116 .parent = &master_clk,
117 .flags = CLK_ENABLE_ON_INIT,
118};
119
120/*
121 * The ordering of these clocks matters, do not change it.
122 */
123static struct clk *onchip_clocks[] = {
124 &master_clk,
125 &peripheral_clk,
126 &bus_clk,
127 &cpu_clk,
128};
129
130int __init __deprecated cpg_clk_init(void)
131{
132 int i, ret = 0;
133
134 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
135 struct clk *clk = onchip_clocks[i];
136 arch_init_clk_ops(&clk->ops, i);
137 if (clk->ops)
138 ret |= clk_register(clk);
139 }
140
141 return ret;
142}
143
144/*
145 * Placeholder for compatability, until the lazy CPUs do this
146 * on their own.
147 */
148int __init __weak arch_clk_init(void)
149{
150 return cpg_clk_init();
151}
Paul Mundt36aa1e32009-05-22 14:00:34 +0900152#endif /* CONFIG_SH_CPG_CLK_LEGACY */