1 // SPDX-License-Identifier: GPL-2.0-only
3 * A devfreq driver for NVIDIA Tegra SoCs
5 * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
6 * Copyright (C) 2014 Google, Inc
10 #include <linux/cpufreq.h>
11 #include <linux/devfreq.h>
12 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_opp.h>
19 #include <linux/reset.h>
20 #include <linux/workqueue.h>
24 #define ACTMON_GLB_STATUS 0x0
25 #define ACTMON_GLB_PERIOD_CTRL 0x4
27 #define ACTMON_DEV_CTRL 0x0
28 #define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
29 #define ACTMON_DEV_CTRL_ENB_PERIODIC BIT(18)
30 #define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN BIT(20)
31 #define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN BIT(21)
32 #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT 23
33 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT 26
34 #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN BIT(29)
35 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30)
36 #define ACTMON_DEV_CTRL_ENB BIT(31)
38 #define ACTMON_DEV_CTRL_STOP 0x00000000
40 #define ACTMON_DEV_UPPER_WMARK 0x4
41 #define ACTMON_DEV_LOWER_WMARK 0x8
42 #define ACTMON_DEV_INIT_AVG 0xc
43 #define ACTMON_DEV_AVG_UPPER_WMARK 0x10
44 #define ACTMON_DEV_AVG_LOWER_WMARK 0x14
45 #define ACTMON_DEV_COUNT_WEIGHT 0x18
46 #define ACTMON_DEV_AVG_COUNT 0x20
47 #define ACTMON_DEV_INTR_STATUS 0x24
49 #define ACTMON_INTR_STATUS_CLEAR 0xffffffff
51 #define ACTMON_DEV_INTR_CONSECUTIVE_UPPER BIT(31)
52 #define ACTMON_DEV_INTR_CONSECUTIVE_LOWER BIT(30)
54 #define ACTMON_ABOVE_WMARK_WINDOW 1
55 #define ACTMON_BELOW_WMARK_WINDOW 3
56 #define ACTMON_BOOST_FREQ_STEP 16000
59 * Activity counter is incremented every 256 memory transactions, and each
60 * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
63 #define ACTMON_COUNT_WEIGHT 0x400
66 * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
67 * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
69 #define ACTMON_AVERAGE_WINDOW_LOG2 6
70 #define ACTMON_SAMPLING_PERIOD 12 /* ms */
71 #define ACTMON_DEFAULT_AVG_BAND 6 /* 1/10 of % */
75 #define KHZ_MAX (ULONG_MAX / KHZ)
77 /* Assume that the bus is saturated if the utilization is 25% */
78 #define BUS_SATURATION_RATIO 25
81 * struct tegra_devfreq_device_config - configuration specific to an ACTMON
84 * Coefficients and thresholds are percentages unless otherwise noted
86 struct tegra_devfreq_device_config {
90 /* Factors applied to boost_freq every consecutive watermark breach */
91 unsigned int boost_up_coeff;
92 unsigned int boost_down_coeff;
94 /* Define the watermark bounds when applied to the current avg */
95 unsigned int boost_up_threshold;
96 unsigned int boost_down_threshold;
99 * Threshold of activity (cycles) below which the CPU frequency isn't
100 * to be taken into account. This is to avoid increasing the EMC
101 * frequency when the CPU is very busy but not accessing the bus often.
103 u32 avg_dependency_threshold;
106 enum tegra_actmon_device {
111 static const struct tegra_devfreq_device_config actmon_device_configs[] = {
113 /* MCALL: All memory accesses (including from the CPUs) */
116 .boost_up_coeff = 200,
117 .boost_down_coeff = 50,
118 .boost_up_threshold = 60,
119 .boost_down_threshold = 40,
122 /* MCCPU: memory accesses from the CPUs */
125 .boost_up_coeff = 800,
126 .boost_down_coeff = 90,
127 .boost_up_threshold = 27,
128 .boost_down_threshold = 10,
129 .avg_dependency_threshold = 50000,
134 * struct tegra_devfreq_device - state specific to an ACTMON device
136 * Frequencies are in kHz.
138 struct tegra_devfreq_device {
139 const struct tegra_devfreq_device_config *config;
142 /* Average event count sampled in the last interrupt */
146 * Extra frequency to increase the target by due to consecutive
147 * watermark breaches.
149 unsigned long boost_freq;
151 /* Optimal frequency calculated from the stats for this device */
152 unsigned long target_freq;
155 struct tegra_devfreq {
156 struct devfreq *devfreq;
158 struct reset_control *reset;
162 struct clk *emc_clock;
163 unsigned long max_freq;
164 unsigned long cur_freq;
165 struct notifier_block clk_rate_change_nb;
167 struct delayed_work cpufreq_update_work;
168 struct notifier_block cpu_rate_change_nb;
170 struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
175 struct tegra_actmon_emc_ratio {
176 unsigned long cpu_freq;
177 unsigned long emc_freq;
180 static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
181 { 1400000, KHZ_MAX },
190 static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
192 return readl_relaxed(tegra->regs + offset);
195 static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
197 writel_relaxed(val, tegra->regs + offset);
200 static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
202 return readl_relaxed(dev->regs + offset);
205 static void device_writel(struct tegra_devfreq_device *dev, u32 val,
208 writel_relaxed(val, dev->regs + offset);
211 static unsigned long do_percent(unsigned long val, unsigned int pct)
213 return val * pct / 100;
216 static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
217 struct tegra_devfreq_device *dev)
219 u32 avg = dev->avg_count;
220 u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
221 u32 band = avg_band_freq * ACTMON_SAMPLING_PERIOD;
223 device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
225 avg = max(dev->avg_count, band);
226 device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
229 static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
230 struct tegra_devfreq_device *dev)
232 u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
234 device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
235 ACTMON_DEV_UPPER_WMARK);
237 device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
238 ACTMON_DEV_LOWER_WMARK);
241 static void actmon_isr_device(struct tegra_devfreq *tegra,
242 struct tegra_devfreq_device *dev)
244 u32 intr_status, dev_ctrl;
246 dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
247 tegra_devfreq_update_avg_wmark(tegra, dev);
249 intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
250 dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
252 if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
254 * new_boost = min(old_boost * up_coef + step, max_freq)
256 dev->boost_freq = do_percent(dev->boost_freq,
257 dev->config->boost_up_coeff);
258 dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
260 dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
262 if (dev->boost_freq >= tegra->max_freq)
263 dev->boost_freq = tegra->max_freq;
265 dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
266 } else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
268 * new_boost = old_boost * down_coef
269 * or 0 if (old_boost * down_coef < step / 2)
271 dev->boost_freq = do_percent(dev->boost_freq,
272 dev->config->boost_down_coeff);
274 dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
276 if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1))
279 dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
282 if (dev->config->avg_dependency_threshold) {
283 if (dev->avg_count >= dev->config->avg_dependency_threshold)
284 dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
285 else if (dev->boost_freq == 0)
286 dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
289 device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
291 device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
294 static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
295 unsigned long cpu_freq)
298 const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
300 for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
301 if (cpu_freq >= ratio->cpu_freq) {
302 if (ratio->emc_freq >= tegra->max_freq)
303 return tegra->max_freq;
305 return ratio->emc_freq;
312 static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
313 struct tegra_devfreq_device *dev)
315 unsigned int avg_sustain_coef;
316 unsigned long target_freq;
318 target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
319 avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
320 target_freq = do_percent(target_freq, avg_sustain_coef);
321 target_freq += dev->boost_freq;
326 static void actmon_update_target(struct tegra_devfreq *tegra,
327 struct tegra_devfreq_device *dev)
329 unsigned long cpu_freq = 0;
330 unsigned long static_cpu_emc_freq = 0;
332 if (dev->config->avg_dependency_threshold) {
333 cpu_freq = cpufreq_quick_get(0);
334 static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
337 dev->target_freq = actmon_device_target_freq(tegra, dev);
339 if (dev->avg_count >= dev->config->avg_dependency_threshold)
340 dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
343 static irqreturn_t actmon_thread_isr(int irq, void *data)
345 struct tegra_devfreq *tegra = data;
346 bool handled = false;
350 mutex_lock(&tegra->devfreq->lock);
352 val = actmon_readl(tegra, ACTMON_GLB_STATUS);
353 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
354 if (val & tegra->devices[i].config->irq_mask) {
355 actmon_isr_device(tegra, tegra->devices + i);
361 update_devfreq(tegra->devfreq);
363 mutex_unlock(&tegra->devfreq->lock);
365 return handled ? IRQ_HANDLED : IRQ_NONE;
368 static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
369 unsigned long action, void *ptr)
371 struct clk_notifier_data *data = ptr;
372 struct tegra_devfreq *tegra;
373 struct tegra_devfreq_device *dev;
376 if (action != POST_RATE_CHANGE)
379 tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
381 tegra->cur_freq = data->new_rate / KHZ;
383 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
384 dev = &tegra->devices[i];
386 tegra_devfreq_update_wmark(tegra, dev);
392 static void tegra_actmon_delayed_update(struct work_struct *work)
394 struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
395 cpufreq_update_work.work);
397 mutex_lock(&tegra->devfreq->lock);
398 update_devfreq(tegra->devfreq);
399 mutex_unlock(&tegra->devfreq->lock);
403 tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
404 unsigned int cpu_freq)
406 unsigned long static_cpu_emc_freq, dev_freq;
408 /* check whether CPU's freq is taken into account at all */
409 if (tegra->devices[MCCPU].avg_count <
410 tegra->devices[MCCPU].config->avg_dependency_threshold)
413 static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
414 dev_freq = actmon_device_target_freq(tegra, &tegra->devices[MCCPU]);
416 if (dev_freq >= static_cpu_emc_freq)
419 return static_cpu_emc_freq;
422 static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
423 unsigned long action, void *ptr)
425 struct cpufreq_freqs *freqs = ptr;
426 struct tegra_devfreq *tegra;
427 unsigned long old, new, delay;
429 if (action != CPUFREQ_POSTCHANGE)
432 tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
435 * Quickly check whether CPU frequency should be taken into account
436 * at all, without blocking CPUFreq's core.
438 if (mutex_trylock(&tegra->devfreq->lock)) {
439 old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
440 new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
441 mutex_unlock(&tegra->devfreq->lock);
444 * If CPU's frequency shouldn't be taken into account at
445 * the moment, then there is no need to update the devfreq's
446 * state because ISR will re-check CPU's frequency on the
454 * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
455 * to allow asynchronous notifications. This means we can't block
456 * here for too long, otherwise CPUFreq's core will complain with a
459 delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
460 schedule_delayed_work(&tegra->cpufreq_update_work, delay);
465 static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
466 struct tegra_devfreq_device *dev)
470 /* reset boosting on governor's restart */
473 dev->target_freq = tegra->cur_freq;
475 dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
476 device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
478 tegra_devfreq_update_avg_wmark(tegra, dev);
479 tegra_devfreq_update_wmark(tegra, dev);
481 device_writel(dev, ACTMON_COUNT_WEIGHT, ACTMON_DEV_COUNT_WEIGHT);
482 device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
484 val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
485 val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
486 << ACTMON_DEV_CTRL_K_VAL_SHIFT;
487 val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
488 << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
489 val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
490 << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
491 val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
492 val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
493 val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
494 val |= ACTMON_DEV_CTRL_ENB;
496 device_writel(dev, val, ACTMON_DEV_CTRL);
499 static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
501 struct tegra_devfreq_device *dev = tegra->devices;
504 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
505 device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
506 device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
507 ACTMON_DEV_INTR_STATUS);
511 static int tegra_actmon_start(struct tegra_devfreq *tegra)
516 actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1,
517 ACTMON_GLB_PERIOD_CTRL);
520 * CLK notifications are needed in order to reconfigure the upper
521 * consecutive watermark in accordance to the actual clock rate
522 * to avoid unnecessary upper interrupts.
524 err = clk_notifier_register(tegra->emc_clock,
525 &tegra->clk_rate_change_nb);
527 dev_err(tegra->devfreq->dev.parent,
528 "Failed to register rate change notifier\n");
532 tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
534 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
535 tegra_actmon_configure_device(tegra, &tegra->devices[i]);
538 * We are estimating CPU's memory bandwidth requirement based on
539 * amount of memory accesses and system's load, judging by CPU's
540 * frequency. We also don't want to receive events about CPU's
541 * frequency transaction when governor is stopped, hence notifier
542 * is registered dynamically.
544 err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
545 CPUFREQ_TRANSITION_NOTIFIER);
547 dev_err(tegra->devfreq->dev.parent,
548 "Failed to register rate change notifier: %d\n", err);
552 enable_irq(tegra->irq);
557 tegra_actmon_stop_devices(tegra);
559 clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
564 static void tegra_actmon_stop(struct tegra_devfreq *tegra)
566 disable_irq(tegra->irq);
568 cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
569 CPUFREQ_TRANSITION_NOTIFIER);
571 cancel_delayed_work_sync(&tegra->cpufreq_update_work);
573 tegra_actmon_stop_devices(tegra);
575 clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
578 static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
581 struct tegra_devfreq *tegra = dev_get_drvdata(dev);
582 struct devfreq *devfreq = tegra->devfreq;
583 struct dev_pm_opp *opp;
587 opp = devfreq_recommended_opp(dev, freq, flags);
589 dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
592 rate = dev_pm_opp_get_freq(opp);
595 err = clk_set_min_rate(tegra->emc_clock, rate * KHZ);
599 err = clk_set_rate(tegra->emc_clock, 0);
601 goto restore_min_rate;
606 clk_set_min_rate(tegra->emc_clock, devfreq->previous_freq);
611 static int tegra_devfreq_get_dev_status(struct device *dev,
612 struct devfreq_dev_status *stat)
614 struct tegra_devfreq *tegra = dev_get_drvdata(dev);
615 struct tegra_devfreq_device *actmon_dev;
616 unsigned long cur_freq;
618 cur_freq = READ_ONCE(tegra->cur_freq);
620 /* To be used by the tegra governor */
621 stat->private_data = tegra;
623 /* The below are to be used by the other governors */
624 stat->current_frequency = cur_freq;
626 actmon_dev = &tegra->devices[MCALL];
628 /* Number of cycles spent on memory access */
629 stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
631 /* The bus can be considered to be saturated way before 100% */
632 stat->busy_time *= 100 / BUS_SATURATION_RATIO;
634 /* Number of cycles in a sampling period */
635 stat->total_time = ACTMON_SAMPLING_PERIOD * cur_freq;
637 stat->busy_time = min(stat->busy_time, stat->total_time);
642 static struct devfreq_dev_profile tegra_devfreq_profile = {
644 .target = tegra_devfreq_target,
645 .get_dev_status = tegra_devfreq_get_dev_status,
648 static int tegra_governor_get_target(struct devfreq *devfreq,
651 struct devfreq_dev_status *stat;
652 struct tegra_devfreq *tegra;
653 struct tegra_devfreq_device *dev;
654 unsigned long target_freq = 0;
658 err = devfreq_update_stats(devfreq);
662 stat = &devfreq->last_status;
664 tegra = stat->private_data;
666 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
667 dev = &tegra->devices[i];
669 actmon_update_target(tegra, dev);
671 target_freq = max(target_freq, dev->target_freq);
679 static int tegra_governor_event_handler(struct devfreq *devfreq,
680 unsigned int event, void *data)
682 struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
686 * Couple devfreq-device with the governor early because it is
687 * needed at the moment of governor's start (used by ISR).
689 tegra->devfreq = devfreq;
692 case DEVFREQ_GOV_START:
693 devfreq_monitor_start(devfreq);
694 ret = tegra_actmon_start(tegra);
697 case DEVFREQ_GOV_STOP:
698 tegra_actmon_stop(tegra);
699 devfreq_monitor_stop(devfreq);
702 case DEVFREQ_GOV_SUSPEND:
703 tegra_actmon_stop(tegra);
704 devfreq_monitor_suspend(devfreq);
707 case DEVFREQ_GOV_RESUME:
708 devfreq_monitor_resume(devfreq);
709 ret = tegra_actmon_start(tegra);
716 static struct devfreq_governor tegra_devfreq_governor = {
717 .name = "tegra_actmon",
718 .get_target_freq = tegra_governor_get_target,
719 .event_handler = tegra_governor_event_handler,
723 static int tegra_devfreq_probe(struct platform_device *pdev)
725 struct tegra_devfreq_device *dev;
726 struct tegra_devfreq *tegra;
727 struct devfreq *devfreq;
732 tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
736 tegra->regs = devm_platform_ioremap_resource(pdev, 0);
737 if (IS_ERR(tegra->regs))
738 return PTR_ERR(tegra->regs);
740 tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
741 if (IS_ERR(tegra->reset)) {
742 dev_err(&pdev->dev, "Failed to get reset\n");
743 return PTR_ERR(tegra->reset);
746 tegra->clock = devm_clk_get(&pdev->dev, "actmon");
747 if (IS_ERR(tegra->clock)) {
748 dev_err(&pdev->dev, "Failed to get actmon clock\n");
749 return PTR_ERR(tegra->clock);
752 tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
753 if (IS_ERR(tegra->emc_clock)) {
754 dev_err(&pdev->dev, "Failed to get emc clock\n");
755 return PTR_ERR(tegra->emc_clock);
758 err = platform_get_irq(pdev, 0);
760 dev_err(&pdev->dev, "Failed to get IRQ: %d\n", err);
765 irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
767 err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
768 actmon_thread_isr, IRQF_ONESHOT,
769 "tegra-devfreq", tegra);
771 dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
775 reset_control_assert(tegra->reset);
777 err = clk_prepare_enable(tegra->clock);
780 "Failed to prepare and enable ACTMON clock\n");
784 reset_control_deassert(tegra->reset);
786 rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
788 dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
792 tegra->max_freq = rate / KHZ;
794 for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
795 dev = tegra->devices + i;
796 dev->config = actmon_device_configs + i;
797 dev->regs = tegra->regs + dev->config->offset;
800 for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
801 rate = clk_round_rate(tegra->emc_clock, rate);
805 "Failed to round clock rate: %ld\n", rate);
810 err = dev_pm_opp_add(&pdev->dev, rate / KHZ, 0);
812 dev_err(&pdev->dev, "Failed to add OPP: %d\n", err);
817 platform_set_drvdata(pdev, tegra);
819 tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
820 tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
822 INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
823 tegra_actmon_delayed_update);
825 err = devfreq_add_governor(&tegra_devfreq_governor);
827 dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
831 tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
832 tegra_devfreq_profile.initial_freq /= KHZ;
834 devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
835 "tegra_actmon", NULL);
836 if (IS_ERR(devfreq)) {
837 err = PTR_ERR(devfreq);
838 goto remove_governor;
844 devfreq_remove_governor(&tegra_devfreq_governor);
847 dev_pm_opp_remove_all_dynamic(&pdev->dev);
849 reset_control_reset(tegra->reset);
850 clk_disable_unprepare(tegra->clock);
855 static int tegra_devfreq_remove(struct platform_device *pdev)
857 struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
859 devfreq_remove_device(tegra->devfreq);
860 devfreq_remove_governor(&tegra_devfreq_governor);
862 dev_pm_opp_remove_all_dynamic(&pdev->dev);
864 reset_control_reset(tegra->reset);
865 clk_disable_unprepare(tegra->clock);
870 static const struct of_device_id tegra_devfreq_of_match[] = {
871 { .compatible = "nvidia,tegra30-actmon" },
872 { .compatible = "nvidia,tegra124-actmon" },
876 MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
878 static struct platform_driver tegra_devfreq_driver = {
879 .probe = tegra_devfreq_probe,
880 .remove = tegra_devfreq_remove,
882 .name = "tegra-devfreq",
883 .of_match_table = tegra_devfreq_of_match,
886 module_platform_driver(tegra_devfreq_driver);
888 MODULE_LICENSE("GPL v2");
889 MODULE_DESCRIPTION("Tegra devfreq driver");
890 MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");