]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/devfreq/devfreq.c
Merge tag 'edac_for_5.6' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras
[linux.git] / drivers / devfreq / devfreq.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
4  *          for Non-CPU Devices.
5  *
6  * Copyright (C) 2011 Samsung Electronics
7  *      MyungJoo Ham <myungjoo.ham@samsung.com>
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/kmod.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/stat.h>
19 #include <linux/pm_opp.h>
20 #include <linux/devfreq.h>
21 #include <linux/workqueue.h>
22 #include <linux/platform_device.h>
23 #include <linux/list.h>
24 #include <linux/printk.h>
25 #include <linux/hrtimer.h>
26 #include <linux/of.h>
27 #include <linux/pm_qos.h>
28 #include "governor.h"
29
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/devfreq.h>
32
33 #define HZ_PER_KHZ      1000
34
35 static struct class *devfreq_class;
36
37 /*
38  * devfreq core provides delayed work based load monitoring helper
39  * functions. Governors can use these or can implement their own
40  * monitoring mechanism.
41  */
42 static struct workqueue_struct *devfreq_wq;
43
44 /* The list of all device-devfreq governors */
45 static LIST_HEAD(devfreq_governor_list);
46 /* The list of all device-devfreq */
47 static LIST_HEAD(devfreq_list);
48 static DEFINE_MUTEX(devfreq_list_lock);
49
50 /**
51  * find_device_devfreq() - find devfreq struct using device pointer
52  * @dev:        device pointer used to lookup device devfreq.
53  *
54  * Search the list of device devfreqs and return the matched device's
55  * devfreq info. devfreq_list_lock should be held by the caller.
56  */
57 static struct devfreq *find_device_devfreq(struct device *dev)
58 {
59         struct devfreq *tmp_devfreq;
60
61         if (IS_ERR_OR_NULL(dev)) {
62                 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
63                 return ERR_PTR(-EINVAL);
64         }
65         WARN(!mutex_is_locked(&devfreq_list_lock),
66              "devfreq_list_lock must be locked.");
67
68         list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
69                 if (tmp_devfreq->dev.parent == dev)
70                         return tmp_devfreq;
71         }
72
73         return ERR_PTR(-ENODEV);
74 }
75
76 static unsigned long find_available_min_freq(struct devfreq *devfreq)
77 {
78         struct dev_pm_opp *opp;
79         unsigned long min_freq = 0;
80
81         opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
82         if (IS_ERR(opp))
83                 min_freq = 0;
84         else
85                 dev_pm_opp_put(opp);
86
87         return min_freq;
88 }
89
90 static unsigned long find_available_max_freq(struct devfreq *devfreq)
91 {
92         struct dev_pm_opp *opp;
93         unsigned long max_freq = ULONG_MAX;
94
95         opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
96         if (IS_ERR(opp))
97                 max_freq = 0;
98         else
99                 dev_pm_opp_put(opp);
100
101         return max_freq;
102 }
103
104 /**
105  * get_freq_range() - Get the current freq range
106  * @devfreq:    the devfreq instance
107  * @min_freq:   the min frequency
108  * @max_freq:   the max frequency
109  *
110  * This takes into consideration all constraints.
111  */
112 static void get_freq_range(struct devfreq *devfreq,
113                            unsigned long *min_freq,
114                            unsigned long *max_freq)
115 {
116         unsigned long *freq_table = devfreq->profile->freq_table;
117         s32 qos_min_freq, qos_max_freq;
118
119         lockdep_assert_held(&devfreq->lock);
120
121         /*
122          * Initialize minimum/maximum frequency from freq table.
123          * The devfreq drivers can initialize this in either ascending or
124          * descending order and devfreq core supports both.
125          */
126         if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
127                 *min_freq = freq_table[0];
128                 *max_freq = freq_table[devfreq->profile->max_state - 1];
129         } else {
130                 *min_freq = freq_table[devfreq->profile->max_state - 1];
131                 *max_freq = freq_table[0];
132         }
133
134         /* Apply constraints from PM QoS */
135         qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent,
136                                              DEV_PM_QOS_MIN_FREQUENCY);
137         qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent,
138                                              DEV_PM_QOS_MAX_FREQUENCY);
139         *min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
140         if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE)
141                 *max_freq = min(*max_freq,
142                                 (unsigned long)HZ_PER_KHZ * qos_max_freq);
143
144         /* Apply constraints from OPP interface */
145         *min_freq = max(*min_freq, devfreq->scaling_min_freq);
146         *max_freq = min(*max_freq, devfreq->scaling_max_freq);
147
148         if (*min_freq > *max_freq)
149                 *min_freq = *max_freq;
150 }
151
152 /**
153  * devfreq_get_freq_level() - Lookup freq_table for the frequency
154  * @devfreq:    the devfreq instance
155  * @freq:       the target frequency
156  */
157 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
158 {
159         int lev;
160
161         for (lev = 0; lev < devfreq->profile->max_state; lev++)
162                 if (freq == devfreq->profile->freq_table[lev])
163                         return lev;
164
165         return -EINVAL;
166 }
167
168 static int set_freq_table(struct devfreq *devfreq)
169 {
170         struct devfreq_dev_profile *profile = devfreq->profile;
171         struct dev_pm_opp *opp;
172         unsigned long freq;
173         int i, count;
174
175         /* Initialize the freq_table from OPP table */
176         count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
177         if (count <= 0)
178                 return -EINVAL;
179
180         profile->max_state = count;
181         profile->freq_table = devm_kcalloc(devfreq->dev.parent,
182                                         profile->max_state,
183                                         sizeof(*profile->freq_table),
184                                         GFP_KERNEL);
185         if (!profile->freq_table) {
186                 profile->max_state = 0;
187                 return -ENOMEM;
188         }
189
190         for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
191                 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
192                 if (IS_ERR(opp)) {
193                         devm_kfree(devfreq->dev.parent, profile->freq_table);
194                         profile->max_state = 0;
195                         return PTR_ERR(opp);
196                 }
197                 dev_pm_opp_put(opp);
198                 profile->freq_table[i] = freq;
199         }
200
201         return 0;
202 }
203
204 /**
205  * devfreq_update_status() - Update statistics of devfreq behavior
206  * @devfreq:    the devfreq instance
207  * @freq:       the update target frequency
208  */
209 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
210 {
211         int lev, prev_lev, ret = 0;
212         unsigned long cur_time;
213
214         lockdep_assert_held(&devfreq->lock);
215         cur_time = jiffies;
216
217         /* Immediately exit if previous_freq is not initialized yet. */
218         if (!devfreq->previous_freq)
219                 goto out;
220
221         prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
222         if (prev_lev < 0) {
223                 ret = prev_lev;
224                 goto out;
225         }
226
227         devfreq->time_in_state[prev_lev] +=
228                          cur_time - devfreq->last_stat_updated;
229
230         lev = devfreq_get_freq_level(devfreq, freq);
231         if (lev < 0) {
232                 ret = lev;
233                 goto out;
234         }
235
236         if (lev != prev_lev) {
237                 devfreq->trans_table[(prev_lev *
238                                 devfreq->profile->max_state) + lev]++;
239                 devfreq->total_trans++;
240         }
241
242 out:
243         devfreq->last_stat_updated = cur_time;
244         return ret;
245 }
246 EXPORT_SYMBOL(devfreq_update_status);
247
248 /**
249  * find_devfreq_governor() - find devfreq governor from name
250  * @name:       name of the governor
251  *
252  * Search the list of devfreq governors and return the matched
253  * governor's pointer. devfreq_list_lock should be held by the caller.
254  */
255 static struct devfreq_governor *find_devfreq_governor(const char *name)
256 {
257         struct devfreq_governor *tmp_governor;
258
259         if (IS_ERR_OR_NULL(name)) {
260                 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
261                 return ERR_PTR(-EINVAL);
262         }
263         WARN(!mutex_is_locked(&devfreq_list_lock),
264              "devfreq_list_lock must be locked.");
265
266         list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
267                 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
268                         return tmp_governor;
269         }
270
271         return ERR_PTR(-ENODEV);
272 }
273
274 /**
275  * try_then_request_governor() - Try to find the governor and request the
276  *                               module if is not found.
277  * @name:       name of the governor
278  *
279  * Search the list of devfreq governors and request the module and try again
280  * if is not found. This can happen when both drivers (the governor driver
281  * and the driver that call devfreq_add_device) are built as modules.
282  * devfreq_list_lock should be held by the caller. Returns the matched
283  * governor's pointer or an error pointer.
284  */
285 static struct devfreq_governor *try_then_request_governor(const char *name)
286 {
287         struct devfreq_governor *governor;
288         int err = 0;
289
290         if (IS_ERR_OR_NULL(name)) {
291                 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
292                 return ERR_PTR(-EINVAL);
293         }
294         WARN(!mutex_is_locked(&devfreq_list_lock),
295              "devfreq_list_lock must be locked.");
296
297         governor = find_devfreq_governor(name);
298         if (IS_ERR(governor)) {
299                 mutex_unlock(&devfreq_list_lock);
300
301                 if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
302                              DEVFREQ_NAME_LEN))
303                         err = request_module("governor_%s", "simpleondemand");
304                 else
305                         err = request_module("governor_%s", name);
306                 /* Restore previous state before return */
307                 mutex_lock(&devfreq_list_lock);
308                 if (err)
309                         return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
310
311                 governor = find_devfreq_governor(name);
312         }
313
314         return governor;
315 }
316
317 static int devfreq_notify_transition(struct devfreq *devfreq,
318                 struct devfreq_freqs *freqs, unsigned int state)
319 {
320         if (!devfreq)
321                 return -EINVAL;
322
323         switch (state) {
324         case DEVFREQ_PRECHANGE:
325                 srcu_notifier_call_chain(&devfreq->transition_notifier_list,
326                                 DEVFREQ_PRECHANGE, freqs);
327                 break;
328
329         case DEVFREQ_POSTCHANGE:
330                 srcu_notifier_call_chain(&devfreq->transition_notifier_list,
331                                 DEVFREQ_POSTCHANGE, freqs);
332                 break;
333         default:
334                 return -EINVAL;
335         }
336
337         return 0;
338 }
339
340 static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
341                               u32 flags)
342 {
343         struct devfreq_freqs freqs;
344         unsigned long cur_freq;
345         int err = 0;
346
347         if (devfreq->profile->get_cur_freq)
348                 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
349         else
350                 cur_freq = devfreq->previous_freq;
351
352         freqs.old = cur_freq;
353         freqs.new = new_freq;
354         devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
355
356         err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
357         if (err) {
358                 freqs.new = cur_freq;
359                 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
360                 return err;
361         }
362
363         freqs.new = new_freq;
364         devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
365
366         if (devfreq_update_status(devfreq, new_freq))
367                 dev_err(&devfreq->dev,
368                         "Couldn't update frequency transition information.\n");
369
370         devfreq->previous_freq = new_freq;
371
372         if (devfreq->suspend_freq)
373                 devfreq->resume_freq = cur_freq;
374
375         return err;
376 }
377
378 /* Load monitoring helper functions for governors use */
379
380 /**
381  * update_devfreq() - Reevaluate the device and configure frequency.
382  * @devfreq:    the devfreq instance.
383  *
384  * Note: Lock devfreq->lock before calling update_devfreq
385  *       This function is exported for governors.
386  */
387 int update_devfreq(struct devfreq *devfreq)
388 {
389         unsigned long freq, min_freq, max_freq;
390         int err = 0;
391         u32 flags = 0;
392
393         if (!mutex_is_locked(&devfreq->lock)) {
394                 WARN(true, "devfreq->lock must be locked by the caller.\n");
395                 return -EINVAL;
396         }
397
398         if (!devfreq->governor)
399                 return -EINVAL;
400
401         /* Reevaluate the proper frequency */
402         err = devfreq->governor->get_target_freq(devfreq, &freq);
403         if (err)
404                 return err;
405         get_freq_range(devfreq, &min_freq, &max_freq);
406
407         if (freq < min_freq) {
408                 freq = min_freq;
409                 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
410         }
411         if (freq > max_freq) {
412                 freq = max_freq;
413                 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
414         }
415
416         return devfreq_set_target(devfreq, freq, flags);
417
418 }
419 EXPORT_SYMBOL(update_devfreq);
420
421 /**
422  * devfreq_monitor() - Periodically poll devfreq objects.
423  * @work:       the work struct used to run devfreq_monitor periodically.
424  *
425  */
426 static void devfreq_monitor(struct work_struct *work)
427 {
428         int err;
429         struct devfreq *devfreq = container_of(work,
430                                         struct devfreq, work.work);
431
432         mutex_lock(&devfreq->lock);
433         err = update_devfreq(devfreq);
434         if (err)
435                 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
436
437         queue_delayed_work(devfreq_wq, &devfreq->work,
438                                 msecs_to_jiffies(devfreq->profile->polling_ms));
439         mutex_unlock(&devfreq->lock);
440
441         trace_devfreq_monitor(devfreq);
442 }
443
444 /**
445  * devfreq_monitor_start() - Start load monitoring of devfreq instance
446  * @devfreq:    the devfreq instance.
447  *
448  * Helper function for starting devfreq device load monitoring. By
449  * default delayed work based monitoring is supported. Function
450  * to be called from governor in response to DEVFREQ_GOV_START
451  * event when device is added to devfreq framework.
452  */
453 void devfreq_monitor_start(struct devfreq *devfreq)
454 {
455         if (devfreq->governor->interrupt_driven)
456                 return;
457
458         INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
459         if (devfreq->profile->polling_ms)
460                 queue_delayed_work(devfreq_wq, &devfreq->work,
461                         msecs_to_jiffies(devfreq->profile->polling_ms));
462 }
463 EXPORT_SYMBOL(devfreq_monitor_start);
464
465 /**
466  * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
467  * @devfreq:    the devfreq instance.
468  *
469  * Helper function to stop devfreq device load monitoring. Function
470  * to be called from governor in response to DEVFREQ_GOV_STOP
471  * event when device is removed from devfreq framework.
472  */
473 void devfreq_monitor_stop(struct devfreq *devfreq)
474 {
475         if (devfreq->governor->interrupt_driven)
476                 return;
477
478         cancel_delayed_work_sync(&devfreq->work);
479 }
480 EXPORT_SYMBOL(devfreq_monitor_stop);
481
482 /**
483  * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
484  * @devfreq:    the devfreq instance.
485  *
486  * Helper function to suspend devfreq device load monitoring. Function
487  * to be called from governor in response to DEVFREQ_GOV_SUSPEND
488  * event or when polling interval is set to zero.
489  *
490  * Note: Though this function is same as devfreq_monitor_stop(),
491  * intentionally kept separate to provide hooks for collecting
492  * transition statistics.
493  */
494 void devfreq_monitor_suspend(struct devfreq *devfreq)
495 {
496         mutex_lock(&devfreq->lock);
497         if (devfreq->stop_polling) {
498                 mutex_unlock(&devfreq->lock);
499                 return;
500         }
501
502         devfreq_update_status(devfreq, devfreq->previous_freq);
503         devfreq->stop_polling = true;
504         mutex_unlock(&devfreq->lock);
505
506         if (devfreq->governor->interrupt_driven)
507                 return;
508
509         cancel_delayed_work_sync(&devfreq->work);
510 }
511 EXPORT_SYMBOL(devfreq_monitor_suspend);
512
513 /**
514  * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
515  * @devfreq:    the devfreq instance.
516  *
517  * Helper function to resume devfreq device load monitoring. Function
518  * to be called from governor in response to DEVFREQ_GOV_RESUME
519  * event or when polling interval is set to non-zero.
520  */
521 void devfreq_monitor_resume(struct devfreq *devfreq)
522 {
523         unsigned long freq;
524
525         mutex_lock(&devfreq->lock);
526         if (!devfreq->stop_polling)
527                 goto out;
528
529         if (devfreq->governor->interrupt_driven)
530                 goto out_update;
531
532         if (!delayed_work_pending(&devfreq->work) &&
533                         devfreq->profile->polling_ms)
534                 queue_delayed_work(devfreq_wq, &devfreq->work,
535                         msecs_to_jiffies(devfreq->profile->polling_ms));
536
537 out_update:
538         devfreq->last_stat_updated = jiffies;
539         devfreq->stop_polling = false;
540
541         if (devfreq->profile->get_cur_freq &&
542                 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
543                 devfreq->previous_freq = freq;
544
545 out:
546         mutex_unlock(&devfreq->lock);
547 }
548 EXPORT_SYMBOL(devfreq_monitor_resume);
549
550 /**
551  * devfreq_interval_update() - Update device devfreq monitoring interval
552  * @devfreq:    the devfreq instance.
553  * @delay:      new polling interval to be set.
554  *
555  * Helper function to set new load monitoring polling interval. Function
556  * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
557  */
558 void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
559 {
560         unsigned int cur_delay = devfreq->profile->polling_ms;
561         unsigned int new_delay = *delay;
562
563         mutex_lock(&devfreq->lock);
564         devfreq->profile->polling_ms = new_delay;
565
566         if (devfreq->stop_polling)
567                 goto out;
568
569         if (devfreq->governor->interrupt_driven)
570                 goto out;
571
572         /* if new delay is zero, stop polling */
573         if (!new_delay) {
574                 mutex_unlock(&devfreq->lock);
575                 cancel_delayed_work_sync(&devfreq->work);
576                 return;
577         }
578
579         /* if current delay is zero, start polling with new delay */
580         if (!cur_delay) {
581                 queue_delayed_work(devfreq_wq, &devfreq->work,
582                         msecs_to_jiffies(devfreq->profile->polling_ms));
583                 goto out;
584         }
585
586         /* if current delay is greater than new delay, restart polling */
587         if (cur_delay > new_delay) {
588                 mutex_unlock(&devfreq->lock);
589                 cancel_delayed_work_sync(&devfreq->work);
590                 mutex_lock(&devfreq->lock);
591                 if (!devfreq->stop_polling)
592                         queue_delayed_work(devfreq_wq, &devfreq->work,
593                                 msecs_to_jiffies(devfreq->profile->polling_ms));
594         }
595 out:
596         mutex_unlock(&devfreq->lock);
597 }
598 EXPORT_SYMBOL(devfreq_interval_update);
599
600 /**
601  * devfreq_notifier_call() - Notify that the device frequency requirements
602  *                           has been changed out of devfreq framework.
603  * @nb:         the notifier_block (supposed to be devfreq->nb)
604  * @type:       not used
605  * @devp:       not used
606  *
607  * Called by a notifier that uses devfreq->nb.
608  */
609 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
610                                  void *devp)
611 {
612         struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
613         int err = -EINVAL;
614
615         mutex_lock(&devfreq->lock);
616
617         devfreq->scaling_min_freq = find_available_min_freq(devfreq);
618         if (!devfreq->scaling_min_freq)
619                 goto out;
620
621         devfreq->scaling_max_freq = find_available_max_freq(devfreq);
622         if (!devfreq->scaling_max_freq) {
623                 devfreq->scaling_max_freq = ULONG_MAX;
624                 goto out;
625         }
626
627         err = update_devfreq(devfreq);
628
629 out:
630         mutex_unlock(&devfreq->lock);
631         if (err)
632                 dev_err(devfreq->dev.parent,
633                         "failed to update frequency from OPP notifier (%d)\n",
634                         err);
635
636         return NOTIFY_OK;
637 }
638
639 /**
640  * qos_notifier_call() - Common handler for QoS constraints.
641  * @devfreq:    the devfreq instance.
642  */
643 static int qos_notifier_call(struct devfreq *devfreq)
644 {
645         int err;
646
647         mutex_lock(&devfreq->lock);
648         err = update_devfreq(devfreq);
649         mutex_unlock(&devfreq->lock);
650         if (err)
651                 dev_err(devfreq->dev.parent,
652                         "failed to update frequency from PM QoS (%d)\n",
653                         err);
654
655         return NOTIFY_OK;
656 }
657
658 /**
659  * qos_min_notifier_call() - Callback for QoS min_freq changes.
660  * @nb:         Should be devfreq->nb_min
661  */
662 static int qos_min_notifier_call(struct notifier_block *nb,
663                                          unsigned long val, void *ptr)
664 {
665         return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
666 }
667
668 /**
669  * qos_max_notifier_call() - Callback for QoS max_freq changes.
670  * @nb:         Should be devfreq->nb_max
671  */
672 static int qos_max_notifier_call(struct notifier_block *nb,
673                                          unsigned long val, void *ptr)
674 {
675         return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
676 }
677
678 /**
679  * devfreq_dev_release() - Callback for struct device to release the device.
680  * @dev:        the devfreq device
681  *
682  * Remove devfreq from the list and release its resources.
683  */
684 static void devfreq_dev_release(struct device *dev)
685 {
686         struct devfreq *devfreq = to_devfreq(dev);
687         int err;
688
689         mutex_lock(&devfreq_list_lock);
690         list_del(&devfreq->node);
691         mutex_unlock(&devfreq_list_lock);
692
693         err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max,
694                                          DEV_PM_QOS_MAX_FREQUENCY);
695         if (err && err != -ENOENT)
696                 dev_warn(dev->parent,
697                         "Failed to remove max_freq notifier: %d\n", err);
698         err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min,
699                                          DEV_PM_QOS_MIN_FREQUENCY);
700         if (err && err != -ENOENT)
701                 dev_warn(dev->parent,
702                         "Failed to remove min_freq notifier: %d\n", err);
703
704         if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
705                 err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
706                 if (err)
707                         dev_warn(dev->parent,
708                                 "Failed to remove max_freq request: %d\n", err);
709         }
710         if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
711                 err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
712                 if (err)
713                         dev_warn(dev->parent,
714                                 "Failed to remove min_freq request: %d\n", err);
715         }
716
717         if (devfreq->profile->exit)
718                 devfreq->profile->exit(devfreq->dev.parent);
719
720         mutex_destroy(&devfreq->lock);
721         kfree(devfreq);
722 }
723
724 /**
725  * devfreq_add_device() - Add devfreq feature to the device
726  * @dev:        the device to add devfreq feature.
727  * @profile:    device-specific profile to run devfreq.
728  * @governor_name:      name of the policy to choose frequency.
729  * @data:       private data for the governor. The devfreq framework does not
730  *              touch this value.
731  */
732 struct devfreq *devfreq_add_device(struct device *dev,
733                                    struct devfreq_dev_profile *profile,
734                                    const char *governor_name,
735                                    void *data)
736 {
737         struct devfreq *devfreq;
738         struct devfreq_governor *governor;
739         static atomic_t devfreq_no = ATOMIC_INIT(-1);
740         int err = 0;
741
742         if (!dev || !profile || !governor_name) {
743                 dev_err(dev, "%s: Invalid parameters.\n", __func__);
744                 return ERR_PTR(-EINVAL);
745         }
746
747         mutex_lock(&devfreq_list_lock);
748         devfreq = find_device_devfreq(dev);
749         mutex_unlock(&devfreq_list_lock);
750         if (!IS_ERR(devfreq)) {
751                 dev_err(dev, "%s: devfreq device already exists!\n",
752                         __func__);
753                 err = -EINVAL;
754                 goto err_out;
755         }
756
757         devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
758         if (!devfreq) {
759                 err = -ENOMEM;
760                 goto err_out;
761         }
762
763         mutex_init(&devfreq->lock);
764         mutex_lock(&devfreq->lock);
765         devfreq->dev.parent = dev;
766         devfreq->dev.class = devfreq_class;
767         devfreq->dev.release = devfreq_dev_release;
768         INIT_LIST_HEAD(&devfreq->node);
769         devfreq->profile = profile;
770         strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
771         devfreq->previous_freq = profile->initial_freq;
772         devfreq->last_status.current_frequency = profile->initial_freq;
773         devfreq->data = data;
774         devfreq->nb.notifier_call = devfreq_notifier_call;
775
776         if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
777                 mutex_unlock(&devfreq->lock);
778                 err = set_freq_table(devfreq);
779                 if (err < 0)
780                         goto err_dev;
781                 mutex_lock(&devfreq->lock);
782         }
783
784         devfreq->scaling_min_freq = find_available_min_freq(devfreq);
785         if (!devfreq->scaling_min_freq) {
786                 mutex_unlock(&devfreq->lock);
787                 err = -EINVAL;
788                 goto err_dev;
789         }
790
791         devfreq->scaling_max_freq = find_available_max_freq(devfreq);
792         if (!devfreq->scaling_max_freq) {
793                 mutex_unlock(&devfreq->lock);
794                 err = -EINVAL;
795                 goto err_dev;
796         }
797
798         devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
799         atomic_set(&devfreq->suspend_count, 0);
800
801         dev_set_name(&devfreq->dev, "devfreq%d",
802                                 atomic_inc_return(&devfreq_no));
803         err = device_register(&devfreq->dev);
804         if (err) {
805                 mutex_unlock(&devfreq->lock);
806                 put_device(&devfreq->dev);
807                 goto err_out;
808         }
809
810         devfreq->trans_table = devm_kzalloc(&devfreq->dev,
811                         array3_size(sizeof(unsigned int),
812                                     devfreq->profile->max_state,
813                                     devfreq->profile->max_state),
814                         GFP_KERNEL);
815         if (!devfreq->trans_table) {
816                 mutex_unlock(&devfreq->lock);
817                 err = -ENOMEM;
818                 goto err_devfreq;
819         }
820
821         devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
822                         devfreq->profile->max_state,
823                         sizeof(unsigned long),
824                         GFP_KERNEL);
825         if (!devfreq->time_in_state) {
826                 mutex_unlock(&devfreq->lock);
827                 err = -ENOMEM;
828                 goto err_devfreq;
829         }
830
831         devfreq->last_stat_updated = jiffies;
832
833         srcu_init_notifier_head(&devfreq->transition_notifier_list);
834
835         mutex_unlock(&devfreq->lock);
836
837         err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req,
838                                      DEV_PM_QOS_MIN_FREQUENCY, 0);
839         if (err < 0)
840                 goto err_devfreq;
841         err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req,
842                                      DEV_PM_QOS_MAX_FREQUENCY,
843                                      PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
844         if (err < 0)
845                 goto err_devfreq;
846
847         devfreq->nb_min.notifier_call = qos_min_notifier_call;
848         err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
849                                       DEV_PM_QOS_MIN_FREQUENCY);
850         if (err)
851                 goto err_devfreq;
852
853         devfreq->nb_max.notifier_call = qos_max_notifier_call;
854         err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
855                                       DEV_PM_QOS_MAX_FREQUENCY);
856         if (err)
857                 goto err_devfreq;
858
859         mutex_lock(&devfreq_list_lock);
860
861         governor = try_then_request_governor(devfreq->governor_name);
862         if (IS_ERR(governor)) {
863                 dev_err(dev, "%s: Unable to find governor for the device\n",
864                         __func__);
865                 err = PTR_ERR(governor);
866                 goto err_init;
867         }
868
869         devfreq->governor = governor;
870         err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
871                                                 NULL);
872         if (err) {
873                 dev_err(dev, "%s: Unable to start governor for the device\n",
874                         __func__);
875                 goto err_init;
876         }
877
878         list_add(&devfreq->node, &devfreq_list);
879
880         mutex_unlock(&devfreq_list_lock);
881
882         return devfreq;
883
884 err_init:
885         mutex_unlock(&devfreq_list_lock);
886 err_devfreq:
887         devfreq_remove_device(devfreq);
888         devfreq = NULL;
889 err_dev:
890         kfree(devfreq);
891 err_out:
892         return ERR_PTR(err);
893 }
894 EXPORT_SYMBOL(devfreq_add_device);
895
896 /**
897  * devfreq_remove_device() - Remove devfreq feature from a device.
898  * @devfreq:    the devfreq instance to be removed
899  *
900  * The opposite of devfreq_add_device().
901  */
902 int devfreq_remove_device(struct devfreq *devfreq)
903 {
904         if (!devfreq)
905                 return -EINVAL;
906
907         if (devfreq->governor)
908                 devfreq->governor->event_handler(devfreq,
909                                                  DEVFREQ_GOV_STOP, NULL);
910         device_unregister(&devfreq->dev);
911
912         return 0;
913 }
914 EXPORT_SYMBOL(devfreq_remove_device);
915
916 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
917 {
918         struct devfreq **r = res;
919
920         if (WARN_ON(!r || !*r))
921                 return 0;
922
923         return *r == data;
924 }
925
926 static void devm_devfreq_dev_release(struct device *dev, void *res)
927 {
928         devfreq_remove_device(*(struct devfreq **)res);
929 }
930
931 /**
932  * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
933  * @dev:        the device to add devfreq feature.
934  * @profile:    device-specific profile to run devfreq.
935  * @governor_name:      name of the policy to choose frequency.
936  * @data:       private data for the governor. The devfreq framework does not
937  *              touch this value.
938  *
939  * This function manages automatically the memory of devfreq device using device
940  * resource management and simplify the free operation for memory of devfreq
941  * device.
942  */
943 struct devfreq *devm_devfreq_add_device(struct device *dev,
944                                         struct devfreq_dev_profile *profile,
945                                         const char *governor_name,
946                                         void *data)
947 {
948         struct devfreq **ptr, *devfreq;
949
950         ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
951         if (!ptr)
952                 return ERR_PTR(-ENOMEM);
953
954         devfreq = devfreq_add_device(dev, profile, governor_name, data);
955         if (IS_ERR(devfreq)) {
956                 devres_free(ptr);
957                 return devfreq;
958         }
959
960         *ptr = devfreq;
961         devres_add(dev, ptr);
962
963         return devfreq;
964 }
965 EXPORT_SYMBOL(devm_devfreq_add_device);
966
967 #ifdef CONFIG_OF
968 /*
969  * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
970  * @dev - instance to the given device
971  * @index - index into list of devfreq
972  *
973  * return the instance of devfreq device
974  */
975 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
976 {
977         struct device_node *node;
978         struct devfreq *devfreq;
979
980         if (!dev)
981                 return ERR_PTR(-EINVAL);
982
983         if (!dev->of_node)
984                 return ERR_PTR(-EINVAL);
985
986         node = of_parse_phandle(dev->of_node, "devfreq", index);
987         if (!node)
988                 return ERR_PTR(-ENODEV);
989
990         mutex_lock(&devfreq_list_lock);
991         list_for_each_entry(devfreq, &devfreq_list, node) {
992                 if (devfreq->dev.parent
993                         && devfreq->dev.parent->of_node == node) {
994                         mutex_unlock(&devfreq_list_lock);
995                         of_node_put(node);
996                         return devfreq;
997                 }
998         }
999         mutex_unlock(&devfreq_list_lock);
1000         of_node_put(node);
1001
1002         return ERR_PTR(-EPROBE_DEFER);
1003 }
1004 #else
1005 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
1006 {
1007         return ERR_PTR(-ENODEV);
1008 }
1009 #endif /* CONFIG_OF */
1010 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
1011
1012 /**
1013  * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
1014  * @dev:        the device from which to remove devfreq feature.
1015  * @devfreq:    the devfreq instance to be removed
1016  */
1017 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
1018 {
1019         WARN_ON(devres_release(dev, devm_devfreq_dev_release,
1020                                devm_devfreq_dev_match, devfreq));
1021 }
1022 EXPORT_SYMBOL(devm_devfreq_remove_device);
1023
1024 /**
1025  * devfreq_suspend_device() - Suspend devfreq of a device.
1026  * @devfreq: the devfreq instance to be suspended
1027  *
1028  * This function is intended to be called by the pm callbacks
1029  * (e.g., runtime_suspend, suspend) of the device driver that
1030  * holds the devfreq.
1031  */
1032 int devfreq_suspend_device(struct devfreq *devfreq)
1033 {
1034         int ret;
1035
1036         if (!devfreq)
1037                 return -EINVAL;
1038
1039         if (atomic_inc_return(&devfreq->suspend_count) > 1)
1040                 return 0;
1041
1042         if (devfreq->governor) {
1043                 ret = devfreq->governor->event_handler(devfreq,
1044                                         DEVFREQ_GOV_SUSPEND, NULL);
1045                 if (ret)
1046                         return ret;
1047         }
1048
1049         if (devfreq->suspend_freq) {
1050                 mutex_lock(&devfreq->lock);
1051                 ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
1052                 mutex_unlock(&devfreq->lock);
1053                 if (ret)
1054                         return ret;
1055         }
1056
1057         return 0;
1058 }
1059 EXPORT_SYMBOL(devfreq_suspend_device);
1060
1061 /**
1062  * devfreq_resume_device() - Resume devfreq of a device.
1063  * @devfreq: the devfreq instance to be resumed
1064  *
1065  * This function is intended to be called by the pm callbacks
1066  * (e.g., runtime_resume, resume) of the device driver that
1067  * holds the devfreq.
1068  */
1069 int devfreq_resume_device(struct devfreq *devfreq)
1070 {
1071         int ret;
1072
1073         if (!devfreq)
1074                 return -EINVAL;
1075
1076         if (atomic_dec_return(&devfreq->suspend_count) >= 1)
1077                 return 0;
1078
1079         if (devfreq->resume_freq) {
1080                 mutex_lock(&devfreq->lock);
1081                 ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
1082                 mutex_unlock(&devfreq->lock);
1083                 if (ret)
1084                         return ret;
1085         }
1086
1087         if (devfreq->governor) {
1088                 ret = devfreq->governor->event_handler(devfreq,
1089                                         DEVFREQ_GOV_RESUME, NULL);
1090                 if (ret)
1091                         return ret;
1092         }
1093
1094         return 0;
1095 }
1096 EXPORT_SYMBOL(devfreq_resume_device);
1097
1098 /**
1099  * devfreq_suspend() - Suspend devfreq governors and devices
1100  *
1101  * Called during system wide Suspend/Hibernate cycles for suspending governors
1102  * and devices preserving the state for resume. On some platforms the devfreq
1103  * device must have precise state (frequency) after resume in order to provide
1104  * fully operating setup.
1105  */
1106 void devfreq_suspend(void)
1107 {
1108         struct devfreq *devfreq;
1109         int ret;
1110
1111         mutex_lock(&devfreq_list_lock);
1112         list_for_each_entry(devfreq, &devfreq_list, node) {
1113                 ret = devfreq_suspend_device(devfreq);
1114                 if (ret)
1115                         dev_err(&devfreq->dev,
1116                                 "failed to suspend devfreq device\n");
1117         }
1118         mutex_unlock(&devfreq_list_lock);
1119 }
1120
1121 /**
1122  * devfreq_resume() - Resume devfreq governors and devices
1123  *
1124  * Called during system wide Suspend/Hibernate cycle for resuming governors and
1125  * devices that are suspended with devfreq_suspend().
1126  */
1127 void devfreq_resume(void)
1128 {
1129         struct devfreq *devfreq;
1130         int ret;
1131
1132         mutex_lock(&devfreq_list_lock);
1133         list_for_each_entry(devfreq, &devfreq_list, node) {
1134                 ret = devfreq_resume_device(devfreq);
1135                 if (ret)
1136                         dev_warn(&devfreq->dev,
1137                                  "failed to resume devfreq device\n");
1138         }
1139         mutex_unlock(&devfreq_list_lock);
1140 }
1141
1142 /**
1143  * devfreq_add_governor() - Add devfreq governor
1144  * @governor:   the devfreq governor to be added
1145  */
1146 int devfreq_add_governor(struct devfreq_governor *governor)
1147 {
1148         struct devfreq_governor *g;
1149         struct devfreq *devfreq;
1150         int err = 0;
1151
1152         if (!governor) {
1153                 pr_err("%s: Invalid parameters.\n", __func__);
1154                 return -EINVAL;
1155         }
1156
1157         mutex_lock(&devfreq_list_lock);
1158         g = find_devfreq_governor(governor->name);
1159         if (!IS_ERR(g)) {
1160                 pr_err("%s: governor %s already registered\n", __func__,
1161                        g->name);
1162                 err = -EINVAL;
1163                 goto err_out;
1164         }
1165
1166         list_add(&governor->node, &devfreq_governor_list);
1167
1168         list_for_each_entry(devfreq, &devfreq_list, node) {
1169                 int ret = 0;
1170                 struct device *dev = devfreq->dev.parent;
1171
1172                 if (!strncmp(devfreq->governor_name, governor->name,
1173                              DEVFREQ_NAME_LEN)) {
1174                         /* The following should never occur */
1175                         if (devfreq->governor) {
1176                                 dev_warn(dev,
1177                                          "%s: Governor %s already present\n",
1178                                          __func__, devfreq->governor->name);
1179                                 ret = devfreq->governor->event_handler(devfreq,
1180                                                         DEVFREQ_GOV_STOP, NULL);
1181                                 if (ret) {
1182                                         dev_warn(dev,
1183                                                  "%s: Governor %s stop = %d\n",
1184                                                  __func__,
1185                                                  devfreq->governor->name, ret);
1186                                 }
1187                                 /* Fall through */
1188                         }
1189                         devfreq->governor = governor;
1190                         ret = devfreq->governor->event_handler(devfreq,
1191                                                 DEVFREQ_GOV_START, NULL);
1192                         if (ret) {
1193                                 dev_warn(dev, "%s: Governor %s start=%d\n",
1194                                          __func__, devfreq->governor->name,
1195                                          ret);
1196                         }
1197                 }
1198         }
1199
1200 err_out:
1201         mutex_unlock(&devfreq_list_lock);
1202
1203         return err;
1204 }
1205 EXPORT_SYMBOL(devfreq_add_governor);
1206
1207 /**
1208  * devfreq_remove_governor() - Remove devfreq feature from a device.
1209  * @governor:   the devfreq governor to be removed
1210  */
1211 int devfreq_remove_governor(struct devfreq_governor *governor)
1212 {
1213         struct devfreq_governor *g;
1214         struct devfreq *devfreq;
1215         int err = 0;
1216
1217         if (!governor) {
1218                 pr_err("%s: Invalid parameters.\n", __func__);
1219                 return -EINVAL;
1220         }
1221
1222         mutex_lock(&devfreq_list_lock);
1223         g = find_devfreq_governor(governor->name);
1224         if (IS_ERR(g)) {
1225                 pr_err("%s: governor %s not registered\n", __func__,
1226                        governor->name);
1227                 err = PTR_ERR(g);
1228                 goto err_out;
1229         }
1230         list_for_each_entry(devfreq, &devfreq_list, node) {
1231                 int ret;
1232                 struct device *dev = devfreq->dev.parent;
1233
1234                 if (!strncmp(devfreq->governor_name, governor->name,
1235                              DEVFREQ_NAME_LEN)) {
1236                         /* we should have a devfreq governor! */
1237                         if (!devfreq->governor) {
1238                                 dev_warn(dev, "%s: Governor %s NOT present\n",
1239                                          __func__, governor->name);
1240                                 continue;
1241                                 /* Fall through */
1242                         }
1243                         ret = devfreq->governor->event_handler(devfreq,
1244                                                 DEVFREQ_GOV_STOP, NULL);
1245                         if (ret) {
1246                                 dev_warn(dev, "%s: Governor %s stop=%d\n",
1247                                          __func__, devfreq->governor->name,
1248                                          ret);
1249                         }
1250                         devfreq->governor = NULL;
1251                 }
1252         }
1253
1254         list_del(&governor->node);
1255 err_out:
1256         mutex_unlock(&devfreq_list_lock);
1257
1258         return err;
1259 }
1260 EXPORT_SYMBOL(devfreq_remove_governor);
1261
1262 static ssize_t governor_show(struct device *dev,
1263                              struct device_attribute *attr, char *buf)
1264 {
1265         if (!to_devfreq(dev)->governor)
1266                 return -EINVAL;
1267
1268         return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
1269 }
1270
1271 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1272                               const char *buf, size_t count)
1273 {
1274         struct devfreq *df = to_devfreq(dev);
1275         int ret;
1276         char str_governor[DEVFREQ_NAME_LEN + 1];
1277         const struct devfreq_governor *governor, *prev_governor;
1278
1279         ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
1280         if (ret != 1)
1281                 return -EINVAL;
1282
1283         mutex_lock(&devfreq_list_lock);
1284         governor = try_then_request_governor(str_governor);
1285         if (IS_ERR(governor)) {
1286                 ret = PTR_ERR(governor);
1287                 goto out;
1288         }
1289         if (df->governor == governor) {
1290                 ret = 0;
1291                 goto out;
1292         } else if ((df->governor && df->governor->immutable) ||
1293                                         governor->immutable) {
1294                 ret = -EINVAL;
1295                 goto out;
1296         }
1297
1298         if (df->governor) {
1299                 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1300                 if (ret) {
1301                         dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1302                                  __func__, df->governor->name, ret);
1303                         goto out;
1304                 }
1305         }
1306         prev_governor = df->governor;
1307         df->governor = governor;
1308         strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
1309         ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1310         if (ret) {
1311                 dev_warn(dev, "%s: Governor %s not started(%d)\n",
1312                          __func__, df->governor->name, ret);
1313                 df->governor = prev_governor;
1314                 strncpy(df->governor_name, prev_governor->name,
1315                         DEVFREQ_NAME_LEN);
1316                 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1317                 if (ret) {
1318                         dev_err(dev,
1319                                 "%s: reverting to Governor %s failed (%d)\n",
1320                                 __func__, df->governor_name, ret);
1321                         df->governor = NULL;
1322                 }
1323         }
1324 out:
1325         mutex_unlock(&devfreq_list_lock);
1326
1327         if (!ret)
1328                 ret = count;
1329         return ret;
1330 }
1331 static DEVICE_ATTR_RW(governor);
1332
1333 static ssize_t available_governors_show(struct device *d,
1334                                         struct device_attribute *attr,
1335                                         char *buf)
1336 {
1337         struct devfreq *df = to_devfreq(d);
1338         ssize_t count = 0;
1339
1340         mutex_lock(&devfreq_list_lock);
1341
1342         /*
1343          * The devfreq with immutable governor (e.g., passive) shows
1344          * only own governor.
1345          */
1346         if (df->governor && df->governor->immutable) {
1347                 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1348                                   "%s ", df->governor_name);
1349         /*
1350          * The devfreq device shows the registered governor except for
1351          * immutable governors such as passive governor .
1352          */
1353         } else {
1354                 struct devfreq_governor *governor;
1355
1356                 list_for_each_entry(governor, &devfreq_governor_list, node) {
1357                         if (governor->immutable)
1358                                 continue;
1359                         count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1360                                            "%s ", governor->name);
1361                 }
1362         }
1363
1364         mutex_unlock(&devfreq_list_lock);
1365
1366         /* Truncate the trailing space */
1367         if (count)
1368                 count--;
1369
1370         count += sprintf(&buf[count], "\n");
1371
1372         return count;
1373 }
1374 static DEVICE_ATTR_RO(available_governors);
1375
1376 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
1377                              char *buf)
1378 {
1379         unsigned long freq;
1380         struct devfreq *devfreq = to_devfreq(dev);
1381
1382         if (devfreq->profile->get_cur_freq &&
1383                 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1384                 return sprintf(buf, "%lu\n", freq);
1385
1386         return sprintf(buf, "%lu\n", devfreq->previous_freq);
1387 }
1388 static DEVICE_ATTR_RO(cur_freq);
1389
1390 static ssize_t target_freq_show(struct device *dev,
1391                                 struct device_attribute *attr, char *buf)
1392 {
1393         return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1394 }
1395 static DEVICE_ATTR_RO(target_freq);
1396
1397 static ssize_t polling_interval_show(struct device *dev,
1398                                      struct device_attribute *attr, char *buf)
1399 {
1400         return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1401 }
1402
1403 static ssize_t polling_interval_store(struct device *dev,
1404                                       struct device_attribute *attr,
1405                                       const char *buf, size_t count)
1406 {
1407         struct devfreq *df = to_devfreq(dev);
1408         unsigned int value;
1409         int ret;
1410
1411         if (!df->governor)
1412                 return -EINVAL;
1413
1414         ret = sscanf(buf, "%u", &value);
1415         if (ret != 1)
1416                 return -EINVAL;
1417
1418         df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
1419         ret = count;
1420
1421         return ret;
1422 }
1423 static DEVICE_ATTR_RW(polling_interval);
1424
1425 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
1426                               const char *buf, size_t count)
1427 {
1428         struct devfreq *df = to_devfreq(dev);
1429         unsigned long value;
1430         int ret;
1431
1432         /*
1433          * Protect against theoretical sysfs writes between
1434          * device_add and dev_pm_qos_add_request
1435          */
1436         if (!dev_pm_qos_request_active(&df->user_min_freq_req))
1437                 return -EAGAIN;
1438
1439         ret = sscanf(buf, "%lu", &value);
1440         if (ret != 1)
1441                 return -EINVAL;
1442
1443         /* Round down to kHz for PM QoS */
1444         ret = dev_pm_qos_update_request(&df->user_min_freq_req,
1445                                         value / HZ_PER_KHZ);
1446         if (ret < 0)
1447                 return ret;
1448
1449         return count;
1450 }
1451
1452 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
1453                              char *buf)
1454 {
1455         struct devfreq *df = to_devfreq(dev);
1456         unsigned long min_freq, max_freq;
1457
1458         mutex_lock(&df->lock);
1459         get_freq_range(df, &min_freq, &max_freq);
1460         mutex_unlock(&df->lock);
1461
1462         return sprintf(buf, "%lu\n", min_freq);
1463 }
1464
1465 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
1466                               const char *buf, size_t count)
1467 {
1468         struct devfreq *df = to_devfreq(dev);
1469         unsigned long value;
1470         int ret;
1471
1472         /*
1473          * Protect against theoretical sysfs writes between
1474          * device_add and dev_pm_qos_add_request
1475          */
1476         if (!dev_pm_qos_request_active(&df->user_max_freq_req))
1477                 return -EINVAL;
1478
1479         ret = sscanf(buf, "%lu", &value);
1480         if (ret != 1)
1481                 return -EINVAL;
1482
1483         /*
1484          * PM QoS frequencies are in kHz so we need to convert. Convert by
1485          * rounding upwards so that the acceptable interval never shrinks.
1486          *
1487          * For example if the user writes "666666666" to sysfs this value will
1488          * be converted to 666667 kHz and back to 666667000 Hz before an OPP
1489          * lookup, this ensures that an OPP of 666666666Hz is still accepted.
1490          *
1491          * A value of zero means "no limit".
1492          */
1493         if (value)
1494                 value = DIV_ROUND_UP(value, HZ_PER_KHZ);
1495         else
1496                 value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
1497
1498         ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
1499         if (ret < 0)
1500                 return ret;
1501
1502         return count;
1503 }
1504 static DEVICE_ATTR_RW(min_freq);
1505
1506 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
1507                              char *buf)
1508 {
1509         struct devfreq *df = to_devfreq(dev);
1510         unsigned long min_freq, max_freq;
1511
1512         mutex_lock(&df->lock);
1513         get_freq_range(df, &min_freq, &max_freq);
1514         mutex_unlock(&df->lock);
1515
1516         return sprintf(buf, "%lu\n", max_freq);
1517 }
1518 static DEVICE_ATTR_RW(max_freq);
1519
1520 static ssize_t available_frequencies_show(struct device *d,
1521                                           struct device_attribute *attr,
1522                                           char *buf)
1523 {
1524         struct devfreq *df = to_devfreq(d);
1525         ssize_t count = 0;
1526         int i;
1527
1528         mutex_lock(&df->lock);
1529
1530         for (i = 0; i < df->profile->max_state; i++)
1531                 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1532                                 "%lu ", df->profile->freq_table[i]);
1533
1534         mutex_unlock(&df->lock);
1535         /* Truncate the trailing space */
1536         if (count)
1537                 count--;
1538
1539         count += sprintf(&buf[count], "\n");
1540
1541         return count;
1542 }
1543 static DEVICE_ATTR_RO(available_frequencies);
1544
1545 static ssize_t trans_stat_show(struct device *dev,
1546                                struct device_attribute *attr, char *buf)
1547 {
1548         struct devfreq *devfreq = to_devfreq(dev);
1549         ssize_t len;
1550         int i, j;
1551         unsigned int max_state = devfreq->profile->max_state;
1552
1553         if (max_state == 0)
1554                 return sprintf(buf, "Not Supported.\n");
1555
1556         mutex_lock(&devfreq->lock);
1557         if (!devfreq->stop_polling &&
1558                         devfreq_update_status(devfreq, devfreq->previous_freq)) {
1559                 mutex_unlock(&devfreq->lock);
1560                 return 0;
1561         }
1562         mutex_unlock(&devfreq->lock);
1563
1564         len = sprintf(buf, "     From  :   To\n");
1565         len += sprintf(buf + len, "           :");
1566         for (i = 0; i < max_state; i++)
1567                 len += sprintf(buf + len, "%10lu",
1568                                 devfreq->profile->freq_table[i]);
1569
1570         len += sprintf(buf + len, "   time(ms)\n");
1571
1572         for (i = 0; i < max_state; i++) {
1573                 if (devfreq->profile->freq_table[i]
1574                                         == devfreq->previous_freq) {
1575                         len += sprintf(buf + len, "*");
1576                 } else {
1577                         len += sprintf(buf + len, " ");
1578                 }
1579                 len += sprintf(buf + len, "%10lu:",
1580                                 devfreq->profile->freq_table[i]);
1581                 for (j = 0; j < max_state; j++)
1582                         len += sprintf(buf + len, "%10u",
1583                                 devfreq->trans_table[(i * max_state) + j]);
1584                 len += sprintf(buf + len, "%10u\n",
1585                         jiffies_to_msecs(devfreq->time_in_state[i]));
1586         }
1587
1588         len += sprintf(buf + len, "Total transition : %u\n",
1589                                         devfreq->total_trans);
1590         return len;
1591 }
1592 static DEVICE_ATTR_RO(trans_stat);
1593
1594 static struct attribute *devfreq_attrs[] = {
1595         &dev_attr_governor.attr,
1596         &dev_attr_available_governors.attr,
1597         &dev_attr_cur_freq.attr,
1598         &dev_attr_available_frequencies.attr,
1599         &dev_attr_target_freq.attr,
1600         &dev_attr_polling_interval.attr,
1601         &dev_attr_min_freq.attr,
1602         &dev_attr_max_freq.attr,
1603         &dev_attr_trans_stat.attr,
1604         NULL,
1605 };
1606 ATTRIBUTE_GROUPS(devfreq);
1607
1608 static int __init devfreq_init(void)
1609 {
1610         devfreq_class = class_create(THIS_MODULE, "devfreq");
1611         if (IS_ERR(devfreq_class)) {
1612                 pr_err("%s: couldn't create class\n", __FILE__);
1613                 return PTR_ERR(devfreq_class);
1614         }
1615
1616         devfreq_wq = create_freezable_workqueue("devfreq_wq");
1617         if (!devfreq_wq) {
1618                 class_destroy(devfreq_class);
1619                 pr_err("%s: couldn't create workqueue\n", __FILE__);
1620                 return -ENOMEM;
1621         }
1622         devfreq_class->dev_groups = devfreq_groups;
1623
1624         return 0;
1625 }
1626 subsys_initcall(devfreq_init);
1627
1628 /*
1629  * The following are helper functions for devfreq user device drivers with
1630  * OPP framework.
1631  */
1632
1633 /**
1634  * devfreq_recommended_opp() - Helper function to get proper OPP for the
1635  *                           freq value given to target callback.
1636  * @dev:        The devfreq user device. (parent of devfreq)
1637  * @freq:       The frequency given to target function
1638  * @flags:      Flags handed from devfreq framework.
1639  *
1640  * The callers are required to call dev_pm_opp_put() for the returned OPP after
1641  * use.
1642  */
1643 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
1644                                            unsigned long *freq,
1645                                            u32 flags)
1646 {
1647         struct dev_pm_opp *opp;
1648
1649         if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1650                 /* The freq is an upper bound. opp should be lower */
1651                 opp = dev_pm_opp_find_freq_floor(dev, freq);
1652
1653                 /* If not available, use the closest opp */
1654                 if (opp == ERR_PTR(-ERANGE))
1655                         opp = dev_pm_opp_find_freq_ceil(dev, freq);
1656         } else {
1657                 /* The freq is an lower bound. opp should be higher */
1658                 opp = dev_pm_opp_find_freq_ceil(dev, freq);
1659
1660                 /* If not available, use the closest opp */
1661                 if (opp == ERR_PTR(-ERANGE))
1662                         opp = dev_pm_opp_find_freq_floor(dev, freq);
1663         }
1664
1665         return opp;
1666 }
1667 EXPORT_SYMBOL(devfreq_recommended_opp);
1668
1669 /**
1670  * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1671  *                                   for any changes in the OPP availability
1672  *                                   changes
1673  * @dev:        The devfreq user device. (parent of devfreq)
1674  * @devfreq:    The devfreq object.
1675  */
1676 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1677 {
1678         return dev_pm_opp_register_notifier(dev, &devfreq->nb);
1679 }
1680 EXPORT_SYMBOL(devfreq_register_opp_notifier);
1681
1682 /**
1683  * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1684  *                                     notified for any changes in the OPP
1685  *                                     availability changes anymore.
1686  * @dev:        The devfreq user device. (parent of devfreq)
1687  * @devfreq:    The devfreq object.
1688  *
1689  * At exit() callback of devfreq_dev_profile, this must be included if
1690  * devfreq_recommended_opp is used.
1691  */
1692 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1693 {
1694         return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
1695 }
1696 EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
1697
1698 static void devm_devfreq_opp_release(struct device *dev, void *res)
1699 {
1700         devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
1701 }
1702
1703 /**
1704  * devm_devfreq_register_opp_notifier() - Resource-managed
1705  *                                        devfreq_register_opp_notifier()
1706  * @dev:        The devfreq user device. (parent of devfreq)
1707  * @devfreq:    The devfreq object.
1708  */
1709 int devm_devfreq_register_opp_notifier(struct device *dev,
1710                                        struct devfreq *devfreq)
1711 {
1712         struct devfreq **ptr;
1713         int ret;
1714
1715         ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
1716         if (!ptr)
1717                 return -ENOMEM;
1718
1719         ret = devfreq_register_opp_notifier(dev, devfreq);
1720         if (ret) {
1721                 devres_free(ptr);
1722                 return ret;
1723         }
1724
1725         *ptr = devfreq;
1726         devres_add(dev, ptr);
1727
1728         return 0;
1729 }
1730 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1731
1732 /**
1733  * devm_devfreq_unregister_opp_notifier() - Resource-managed
1734  *                                          devfreq_unregister_opp_notifier()
1735  * @dev:        The devfreq user device. (parent of devfreq)
1736  * @devfreq:    The devfreq object.
1737  */
1738 void devm_devfreq_unregister_opp_notifier(struct device *dev,
1739                                          struct devfreq *devfreq)
1740 {
1741         WARN_ON(devres_release(dev, devm_devfreq_opp_release,
1742                                devm_devfreq_dev_match, devfreq));
1743 }
1744 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1745
1746 /**
1747  * devfreq_register_notifier() - Register a driver with devfreq
1748  * @devfreq:    The devfreq object.
1749  * @nb:         The notifier block to register.
1750  * @list:       DEVFREQ_TRANSITION_NOTIFIER.
1751  */
1752 int devfreq_register_notifier(struct devfreq *devfreq,
1753                               struct notifier_block *nb,
1754                               unsigned int list)
1755 {
1756         int ret = 0;
1757
1758         if (!devfreq)
1759                 return -EINVAL;
1760
1761         switch (list) {
1762         case DEVFREQ_TRANSITION_NOTIFIER:
1763                 ret = srcu_notifier_chain_register(
1764                                 &devfreq->transition_notifier_list, nb);
1765                 break;
1766         default:
1767                 ret = -EINVAL;
1768         }
1769
1770         return ret;
1771 }
1772 EXPORT_SYMBOL(devfreq_register_notifier);
1773
1774 /*
1775  * devfreq_unregister_notifier() - Unregister a driver with devfreq
1776  * @devfreq:    The devfreq object.
1777  * @nb:         The notifier block to be unregistered.
1778  * @list:       DEVFREQ_TRANSITION_NOTIFIER.
1779  */
1780 int devfreq_unregister_notifier(struct devfreq *devfreq,
1781                                 struct notifier_block *nb,
1782                                 unsigned int list)
1783 {
1784         int ret = 0;
1785
1786         if (!devfreq)
1787                 return -EINVAL;
1788
1789         switch (list) {
1790         case DEVFREQ_TRANSITION_NOTIFIER:
1791                 ret = srcu_notifier_chain_unregister(
1792                                 &devfreq->transition_notifier_list, nb);
1793                 break;
1794         default:
1795                 ret = -EINVAL;
1796         }
1797
1798         return ret;
1799 }
1800 EXPORT_SYMBOL(devfreq_unregister_notifier);
1801
1802 struct devfreq_notifier_devres {
1803         struct devfreq *devfreq;
1804         struct notifier_block *nb;
1805         unsigned int list;
1806 };
1807
1808 static void devm_devfreq_notifier_release(struct device *dev, void *res)
1809 {
1810         struct devfreq_notifier_devres *this = res;
1811
1812         devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
1813 }
1814
1815 /**
1816  * devm_devfreq_register_notifier()
1817         - Resource-managed devfreq_register_notifier()
1818  * @dev:        The devfreq user device. (parent of devfreq)
1819  * @devfreq:    The devfreq object.
1820  * @nb:         The notifier block to be unregistered.
1821  * @list:       DEVFREQ_TRANSITION_NOTIFIER.
1822  */
1823 int devm_devfreq_register_notifier(struct device *dev,
1824                                 struct devfreq *devfreq,
1825                                 struct notifier_block *nb,
1826                                 unsigned int list)
1827 {
1828         struct devfreq_notifier_devres *ptr;
1829         int ret;
1830
1831         ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
1832                                 GFP_KERNEL);
1833         if (!ptr)
1834                 return -ENOMEM;
1835
1836         ret = devfreq_register_notifier(devfreq, nb, list);
1837         if (ret) {
1838                 devres_free(ptr);
1839                 return ret;
1840         }
1841
1842         ptr->devfreq = devfreq;
1843         ptr->nb = nb;
1844         ptr->list = list;
1845         devres_add(dev, ptr);
1846
1847         return 0;
1848 }
1849 EXPORT_SYMBOL(devm_devfreq_register_notifier);
1850
1851 /**
1852  * devm_devfreq_unregister_notifier()
1853         - Resource-managed devfreq_unregister_notifier()
1854  * @dev:        The devfreq user device. (parent of devfreq)
1855  * @devfreq:    The devfreq object.
1856  * @nb:         The notifier block to be unregistered.
1857  * @list:       DEVFREQ_TRANSITION_NOTIFIER.
1858  */
1859 void devm_devfreq_unregister_notifier(struct device *dev,
1860                                       struct devfreq *devfreq,
1861                                       struct notifier_block *nb,
1862                                       unsigned int list)
1863 {
1864         WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
1865                                devm_devfreq_dev_match, devfreq));
1866 }
1867 EXPORT_SYMBOL(devm_devfreq_unregister_notifier);