]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/clk/imx/clk-pll14xx.c
zram: fix race between backing_dev_show and backing_dev_store
[linux.git] / drivers / clk / imx / clk-pll14xx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2017-2018 NXP.
4  */
5
6 #include <linux/bitops.h>
7 #include <linux/clk-provider.h>
8 #include <linux/err.h>
9 #include <linux/io.h>
10 #include <linux/iopoll.h>
11 #include <linux/slab.h>
12 #include <linux/jiffies.h>
13
14 #include "clk.h"
15
16 #define GNRL_CTL        0x0
17 #define DIV_CTL         0x4
18 #define LOCK_STATUS     BIT(31)
19 #define LOCK_SEL_MASK   BIT(29)
20 #define CLKE_MASK       BIT(11)
21 #define RST_MASK        BIT(9)
22 #define BYPASS_MASK     BIT(4)
23 #define MDIV_SHIFT      12
24 #define MDIV_MASK       GENMASK(21, 12)
25 #define PDIV_SHIFT      4
26 #define PDIV_MASK       GENMASK(9, 4)
27 #define SDIV_SHIFT      0
28 #define SDIV_MASK       GENMASK(2, 0)
29 #define KDIV_SHIFT      0
30 #define KDIV_MASK       GENMASK(15, 0)
31
32 #define LOCK_TIMEOUT_US         10000
33
34 struct clk_pll14xx {
35         struct clk_hw                   hw;
36         void __iomem                    *base;
37         enum imx_pll14xx_type           type;
38         const struct imx_pll14xx_rate_table *rate_table;
39         int rate_count;
40 };
41
42 #define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
43
44 static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
45                 struct clk_pll14xx *pll, unsigned long rate)
46 {
47         const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
48         int i;
49
50         for (i = 0; i < pll->rate_count; i++)
51                 if (rate == rate_table[i].rate)
52                         return &rate_table[i];
53
54         return NULL;
55 }
56
57 static long clk_pll14xx_round_rate(struct clk_hw *hw, unsigned long rate,
58                         unsigned long *prate)
59 {
60         struct clk_pll14xx *pll = to_clk_pll14xx(hw);
61         const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
62         int i;
63
64         /* Assumming rate_table is in descending order */
65         for (i = 0; i < pll->rate_count; i++)
66                 if (rate >= rate_table[i].rate)
67                         return rate_table[i].rate;
68
69         /* return minimum supported value */
70         return rate_table[i - 1].rate;
71 }
72
73 static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw,
74                                                   unsigned long parent_rate)
75 {
76         struct clk_pll14xx *pll = to_clk_pll14xx(hw);
77         u32 mdiv, pdiv, sdiv, pll_div;
78         u64 fvco = parent_rate;
79
80         pll_div = readl_relaxed(pll->base + 4);
81         mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
82         pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
83         sdiv = (pll_div & SDIV_MASK) >> SDIV_SHIFT;
84
85         fvco *= mdiv;
86         do_div(fvco, pdiv << sdiv);
87
88         return fvco;
89 }
90
91 static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
92                                                   unsigned long parent_rate)
93 {
94         struct clk_pll14xx *pll = to_clk_pll14xx(hw);
95         u32 mdiv, pdiv, sdiv, pll_div_ctl0, pll_div_ctl1;
96         short int kdiv;
97         u64 fvco = parent_rate;
98
99         pll_div_ctl0 = readl_relaxed(pll->base + 4);
100         pll_div_ctl1 = readl_relaxed(pll->base + 8);
101         mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT;
102         pdiv = (pll_div_ctl0 & PDIV_MASK) >> PDIV_SHIFT;
103         sdiv = (pll_div_ctl0 & SDIV_MASK) >> SDIV_SHIFT;
104         kdiv = pll_div_ctl1 & KDIV_MASK;
105
106         /* fvco = (m * 65536 + k) * Fin / (p * 65536) */
107         fvco *= (mdiv * 65536 + kdiv);
108         pdiv *= 65536;
109
110         do_div(fvco, pdiv << sdiv);
111
112         return fvco;
113 }
114
115 static inline bool clk_pll1416x_mp_change(const struct imx_pll14xx_rate_table *rate,
116                                           u32 pll_div)
117 {
118         u32 old_mdiv, old_pdiv;
119
120         old_mdiv = (pll_div >> MDIV_SHIFT) & MDIV_MASK;
121         old_pdiv = (pll_div >> PDIV_SHIFT) & PDIV_MASK;
122
123         return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
124 }
125
126 static inline bool clk_pll1443x_mpk_change(const struct imx_pll14xx_rate_table *rate,
127                                           u32 pll_div_ctl0, u32 pll_div_ctl1)
128 {
129         u32 old_mdiv, old_pdiv, old_kdiv;
130
131         old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
132         old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
133         old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
134
135         return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
136                 rate->kdiv != old_kdiv;
137 }
138
139 static inline bool clk_pll1443x_mp_change(const struct imx_pll14xx_rate_table *rate,
140                                           u32 pll_div_ctl0, u32 pll_div_ctl1)
141 {
142         u32 old_mdiv, old_pdiv, old_kdiv;
143
144         old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
145         old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
146         old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
147
148         return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
149                 rate->kdiv != old_kdiv;
150 }
151
152 static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
153 {
154         u32 val;
155
156         return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0,
157                         LOCK_TIMEOUT_US);
158 }
159
160 static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
161                                  unsigned long prate)
162 {
163         struct clk_pll14xx *pll = to_clk_pll14xx(hw);
164         const struct imx_pll14xx_rate_table *rate;
165         u32 tmp, div_val;
166         int ret;
167
168         rate = imx_get_pll_settings(pll, drate);
169         if (!rate) {
170                 pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
171                        drate, clk_hw_get_name(hw));
172                 return -EINVAL;
173         }
174
175         tmp = readl_relaxed(pll->base + 4);
176
177         if (!clk_pll1416x_mp_change(rate, tmp)) {
178                 tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
179                 tmp |= rate->sdiv << SDIV_SHIFT;
180                 writel_relaxed(tmp, pll->base + 4);
181
182                 return 0;
183         }
184
185         /* Bypass clock and set lock to pll output lock */
186         tmp = readl_relaxed(pll->base);
187         tmp |= LOCK_SEL_MASK;
188         writel_relaxed(tmp, pll->base);
189
190         /* Enable RST */
191         tmp &= ~RST_MASK;
192         writel_relaxed(tmp, pll->base);
193
194         /* Enable BYPASS */
195         tmp |= BYPASS_MASK;
196         writel(tmp, pll->base);
197
198         div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
199                 (rate->sdiv << SDIV_SHIFT);
200         writel_relaxed(div_val, pll->base + 0x4);
201
202         /*
203          * According to SPEC, t3 - t2 need to be greater than
204          * 1us and 1/FREF, respectively.
205          * FREF is FIN / Prediv, the prediv is [1, 63], so choose
206          * 3us.
207          */
208         udelay(3);
209
210         /* Disable RST */
211         tmp |= RST_MASK;
212         writel_relaxed(tmp, pll->base);
213
214         /* Wait Lock */
215         ret = clk_pll14xx_wait_lock(pll);
216         if (ret)
217                 return ret;
218
219         /* Bypass */
220         tmp &= ~BYPASS_MASK;
221         writel_relaxed(tmp, pll->base);
222
223         return 0;
224 }
225
226 static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
227                                  unsigned long prate)
228 {
229         struct clk_pll14xx *pll = to_clk_pll14xx(hw);
230         const struct imx_pll14xx_rate_table *rate;
231         u32 tmp, div_val;
232         int ret;
233
234         rate = imx_get_pll_settings(pll, drate);
235         if (!rate) {
236                 pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
237                         drate, clk_hw_get_name(hw));
238                 return -EINVAL;
239         }
240
241         tmp = readl_relaxed(pll->base + 4);
242         div_val = readl_relaxed(pll->base + 8);
243
244         if (!clk_pll1443x_mpk_change(rate, tmp, div_val)) {
245                 tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
246                 tmp |= rate->sdiv << SDIV_SHIFT;
247                 writel_relaxed(tmp, pll->base + 4);
248
249                 return 0;
250         }
251
252         /* Enable RST */
253         tmp = readl_relaxed(pll->base);
254         tmp &= ~RST_MASK;
255         writel_relaxed(tmp, pll->base);
256
257         /* Enable BYPASS */
258         tmp |= BYPASS_MASK;
259         writel_relaxed(tmp, pll->base);
260
261         div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
262                 (rate->sdiv << SDIV_SHIFT);
263         writel_relaxed(div_val, pll->base + 0x4);
264         writel_relaxed(rate->kdiv << KDIV_SHIFT, pll->base + 0x8);
265
266         /*
267          * According to SPEC, t3 - t2 need to be greater than
268          * 1us and 1/FREF, respectively.
269          * FREF is FIN / Prediv, the prediv is [1, 63], so choose
270          * 3us.
271          */
272         udelay(3);
273
274         /* Disable RST */
275         tmp |= RST_MASK;
276         writel_relaxed(tmp, pll->base);
277
278         /* Wait Lock*/
279         ret = clk_pll14xx_wait_lock(pll);
280         if (ret)
281                 return ret;
282
283         /* Bypass */
284         tmp &= ~BYPASS_MASK;
285         writel_relaxed(tmp, pll->base);
286
287         return 0;
288 }
289
290 static int clk_pll14xx_prepare(struct clk_hw *hw)
291 {
292         struct clk_pll14xx *pll = to_clk_pll14xx(hw);
293         u32 val;
294         int ret;
295
296         /*
297          * RESETB = 1 from 0, PLL starts its normal
298          * operation after lock time
299          */
300         val = readl_relaxed(pll->base + GNRL_CTL);
301         if (val & RST_MASK)
302                 return 0;
303         val |= BYPASS_MASK;
304         writel_relaxed(val, pll->base + GNRL_CTL);
305         val |= RST_MASK;
306         writel_relaxed(val, pll->base + GNRL_CTL);
307
308         ret = clk_pll14xx_wait_lock(pll);
309         if (ret)
310                 return ret;
311
312         val &= ~BYPASS_MASK;
313         writel_relaxed(val, pll->base + GNRL_CTL);
314
315         return 0;
316 }
317
318 static int clk_pll14xx_is_prepared(struct clk_hw *hw)
319 {
320         struct clk_pll14xx *pll = to_clk_pll14xx(hw);
321         u32 val;
322
323         val = readl_relaxed(pll->base + GNRL_CTL);
324
325         return (val & RST_MASK) ? 1 : 0;
326 }
327
328 static void clk_pll14xx_unprepare(struct clk_hw *hw)
329 {
330         struct clk_pll14xx *pll = to_clk_pll14xx(hw);
331         u32 val;
332
333         /*
334          * Set RST to 0, power down mode is enabled and
335          * every digital block is reset
336          */
337         val = readl_relaxed(pll->base + GNRL_CTL);
338         val &= ~RST_MASK;
339         writel_relaxed(val, pll->base + GNRL_CTL);
340 }
341
342 static const struct clk_ops clk_pll1416x_ops = {
343         .prepare        = clk_pll14xx_prepare,
344         .unprepare      = clk_pll14xx_unprepare,
345         .is_prepared    = clk_pll14xx_is_prepared,
346         .recalc_rate    = clk_pll1416x_recalc_rate,
347         .round_rate     = clk_pll14xx_round_rate,
348         .set_rate       = clk_pll1416x_set_rate,
349 };
350
351 static const struct clk_ops clk_pll1416x_min_ops = {
352         .recalc_rate    = clk_pll1416x_recalc_rate,
353 };
354
355 static const struct clk_ops clk_pll1443x_ops = {
356         .prepare        = clk_pll14xx_prepare,
357         .unprepare      = clk_pll14xx_unprepare,
358         .is_prepared    = clk_pll14xx_is_prepared,
359         .recalc_rate    = clk_pll1443x_recalc_rate,
360         .round_rate     = clk_pll14xx_round_rate,
361         .set_rate       = clk_pll1443x_set_rate,
362 };
363
364 struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
365                             void __iomem *base,
366                             const struct imx_pll14xx_clk *pll_clk)
367 {
368         struct clk_pll14xx *pll;
369         struct clk *clk;
370         struct clk_init_data init;
371         u32 val;
372
373         pll = kzalloc(sizeof(*pll), GFP_KERNEL);
374         if (!pll)
375                 return ERR_PTR(-ENOMEM);
376
377         init.name = name;
378         init.flags = pll_clk->flags;
379         init.parent_names = &parent_name;
380         init.num_parents = 1;
381
382         switch (pll_clk->type) {
383         case PLL_1416X:
384                 if (!pll_clk->rate_table)
385                         init.ops = &clk_pll1416x_min_ops;
386                 else
387                         init.ops = &clk_pll1416x_ops;
388                 break;
389         case PLL_1443X:
390                 init.ops = &clk_pll1443x_ops;
391                 break;
392         default:
393                 pr_err("%s: Unknown pll type for pll clk %s\n",
394                        __func__, name);
395         };
396
397         pll->base = base;
398         pll->hw.init = &init;
399         pll->type = pll_clk->type;
400         pll->rate_table = pll_clk->rate_table;
401         pll->rate_count = pll_clk->rate_count;
402
403         val = readl_relaxed(pll->base + GNRL_CTL);
404         val &= ~BYPASS_MASK;
405         writel_relaxed(val, pll->base + GNRL_CTL);
406
407         clk = clk_register(NULL, &pll->hw);
408         if (IS_ERR(clk)) {
409                 pr_err("%s: failed to register pll %s %lu\n",
410                         __func__, name, PTR_ERR(clk));
411                 kfree(pll);
412         }
413
414         return clk;
415 }