]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/locking/qspinlock_stat.h
94d4533fe984b5c81a9e6be4d5679f430760aaeb
[linux.git] / kernel / locking / qspinlock_stat.h
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 2 of the License, or
5  * (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * Authors: Waiman Long <waiman.long@hpe.com>
13  */
14
15 /*
16  * When queued spinlock statistical counters are enabled, the following
17  * debugfs files will be created for reporting the counter values:
18  *
19  * <debugfs>/qlockstat/
20  *   pv_hash_hops       - average # of hops per hashing operation
21  *   pv_kick_unlock     - # of vCPU kicks issued at unlock time
22  *   pv_kick_wake       - # of vCPU kicks used for computing pv_latency_wake
23  *   pv_latency_kick    - average latency (ns) of vCPU kick operation
24  *   pv_latency_wake    - average latency (ns) from vCPU kick to wakeup
25  *   pv_lock_stealing   - # of lock stealing operations
26  *   pv_spurious_wakeup - # of spurious wakeups
27  *   pv_wait_again      - # of vCPU wait's that happened after a vCPU kick
28  *   pv_wait_head       - # of vCPU wait's at the queue head
29  *   pv_wait_node       - # of vCPU wait's at a non-head queue node
30  *
31  * Writing to the "reset_counters" file will reset all the above counter
32  * values.
33  *
34  * These statistical counters are implemented as per-cpu variables which are
35  * summed and computed whenever the corresponding debugfs files are read. This
36  * minimizes added overhead making the counters usable even in a production
37  * environment.
38  *
39  * There may be slight difference between pv_kick_wake and pv_kick_unlock.
40  */
41 enum qlock_stats {
42         qstat_pv_hash_hops,
43         qstat_pv_kick_unlock,
44         qstat_pv_kick_wake,
45         qstat_pv_latency_kick,
46         qstat_pv_latency_wake,
47         qstat_pv_lock_stealing,
48         qstat_pv_spurious_wakeup,
49         qstat_pv_wait_again,
50         qstat_pv_wait_head,
51         qstat_pv_wait_node,
52         qstat_num,      /* Total number of statistical counters */
53         qstat_reset_cnts = qstat_num,
54 };
55
56 #ifdef CONFIG_QUEUED_LOCK_STAT
57 /*
58  * Collect pvqspinlock statistics
59  */
60 #include <linux/debugfs.h>
61 #include <linux/sched.h>
62 #include <linux/fs.h>
63
64 static const char * const qstat_names[qstat_num + 1] = {
65         [qstat_pv_hash_hops]       = "pv_hash_hops",
66         [qstat_pv_kick_unlock]     = "pv_kick_unlock",
67         [qstat_pv_kick_wake]       = "pv_kick_wake",
68         [qstat_pv_spurious_wakeup] = "pv_spurious_wakeup",
69         [qstat_pv_latency_kick]    = "pv_latency_kick",
70         [qstat_pv_latency_wake]    = "pv_latency_wake",
71         [qstat_pv_lock_stealing]   = "pv_lock_stealing",
72         [qstat_pv_wait_again]      = "pv_wait_again",
73         [qstat_pv_wait_head]       = "pv_wait_head",
74         [qstat_pv_wait_node]       = "pv_wait_node",
75         [qstat_reset_cnts]         = "reset_counters",
76 };
77
78 /*
79  * Per-cpu counters
80  */
81 static DEFINE_PER_CPU(unsigned long, qstats[qstat_num]);
82 static DEFINE_PER_CPU(u64, pv_kick_time);
83
84 /*
85  * Function to read and return the qlock statistical counter values
86  *
87  * The following counters are handled specially:
88  * 1. qstat_pv_latency_kick
89  *    Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
90  * 2. qstat_pv_latency_wake
91  *    Average wake latency (ns) = pv_latency_wake/pv_kick_wake
92  * 3. qstat_pv_hash_hops
93  *    Average hops/hash = pv_hash_hops/pv_kick_unlock
94  */
95 static ssize_t qstat_read(struct file *file, char __user *user_buf,
96                           size_t count, loff_t *ppos)
97 {
98         char buf[64];
99         int cpu, counter, len;
100         u64 stat = 0, kicks = 0;
101
102         /*
103          * Get the counter ID stored in file->f_inode->i_private
104          */
105         if (!file->f_inode) {
106                 WARN_ON_ONCE(1);
107                 return -EBADF;
108         }
109         counter = (long)(file->f_inode->i_private);
110
111         if (counter >= qstat_num)
112                 return -EBADF;
113
114         for_each_possible_cpu(cpu) {
115                 stat += per_cpu(qstats[counter], cpu);
116                 /*
117                  * Need to sum additional counter for some of them
118                  */
119                 switch (counter) {
120
121                 case qstat_pv_latency_kick:
122                 case qstat_pv_hash_hops:
123                         kicks += per_cpu(qstats[qstat_pv_kick_unlock], cpu);
124                         break;
125
126                 case qstat_pv_latency_wake:
127                         kicks += per_cpu(qstats[qstat_pv_kick_wake], cpu);
128                         break;
129                 }
130         }
131
132         if (counter == qstat_pv_hash_hops) {
133                 u64 frac;
134
135                 frac = 100ULL * do_div(stat, kicks);
136                 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
137
138                 /*
139                  * Return a X.XX decimal number
140                  */
141                 len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n", stat, frac);
142         } else {
143                 /*
144                  * Round to the nearest ns
145                  */
146                 if ((counter == qstat_pv_latency_kick) ||
147                     (counter == qstat_pv_latency_wake)) {
148                         stat = 0;
149                         if (kicks)
150                                 stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
151                 }
152                 len = snprintf(buf, sizeof(buf) - 1, "%llu\n", stat);
153         }
154
155         return simple_read_from_buffer(user_buf, count, ppos, buf, len);
156 }
157
158 /*
159  * Function to handle write request
160  *
161  * When counter = reset_cnts, reset all the counter values.
162  * Since the counter updates aren't atomic, the resetting is done twice
163  * to make sure that the counters are very likely to be all cleared.
164  */
165 static ssize_t qstat_write(struct file *file, const char __user *user_buf,
166                            size_t count, loff_t *ppos)
167 {
168         int cpu;
169
170         /*
171          * Get the counter ID stored in file->f_inode->i_private
172          */
173         if (!file->f_inode) {
174                 WARN_ON_ONCE(1);
175                 return -EBADF;
176         }
177         if ((long)(file->f_inode->i_private) != qstat_reset_cnts)
178                 return count;
179
180         for_each_possible_cpu(cpu) {
181                 int i;
182                 unsigned long *ptr = per_cpu_ptr(qstats, cpu);
183
184                 for (i = 0 ; i < qstat_num; i++)
185                         WRITE_ONCE(ptr[i], 0);
186                 for (i = 0 ; i < qstat_num; i++)
187                         WRITE_ONCE(ptr[i], 0);
188         }
189         return count;
190 }
191
192 /*
193  * Debugfs data structures
194  */
195 static const struct file_operations fops_qstat = {
196         .read = qstat_read,
197         .write = qstat_write,
198         .llseek = default_llseek,
199 };
200
201 /*
202  * Initialize debugfs for the qspinlock statistical counters
203  */
204 static int __init init_qspinlock_stat(void)
205 {
206         struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL);
207         int i;
208
209         if (!d_qstat) {
210                 pr_warn("Could not create 'qlockstat' debugfs directory\n");
211                 return 0;
212         }
213
214         /*
215          * Create the debugfs files
216          *
217          * As reading from and writing to the stat files can be slow, only
218          * root is allowed to do the read/write to limit impact to system
219          * performance.
220          */
221         for (i = 0; i < qstat_num; i++)
222                 debugfs_create_file(qstat_names[i], 0400, d_qstat,
223                                    (void *)(long)i, &fops_qstat);
224
225         debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat,
226                            (void *)(long)qstat_reset_cnts, &fops_qstat);
227         return 0;
228 }
229 fs_initcall(init_qspinlock_stat);
230
231 /*
232  * Increment the PV qspinlock statistical counters
233  */
234 static inline void qstat_inc(enum qlock_stats stat, bool cond)
235 {
236         if (cond)
237                 this_cpu_inc(qstats[stat]);
238 }
239
240 /*
241  * PV hash hop count
242  */
243 static inline void qstat_hop(int hopcnt)
244 {
245         this_cpu_add(qstats[qstat_pv_hash_hops], hopcnt);
246 }
247
248 /*
249  * Replacement function for pv_kick()
250  */
251 static inline void __pv_kick(int cpu)
252 {
253         u64 start = sched_clock();
254
255         per_cpu(pv_kick_time, cpu) = start;
256         pv_kick(cpu);
257         this_cpu_add(qstats[qstat_pv_latency_kick], sched_clock() - start);
258 }
259
260 /*
261  * Replacement function for pv_wait()
262  */
263 static inline void __pv_wait(u8 *ptr, u8 val)
264 {
265         u64 *pkick_time = this_cpu_ptr(&pv_kick_time);
266
267         *pkick_time = 0;
268         pv_wait(ptr, val);
269         if (*pkick_time) {
270                 this_cpu_add(qstats[qstat_pv_latency_wake],
271                              sched_clock() - *pkick_time);
272                 qstat_inc(qstat_pv_kick_wake, true);
273         }
274 }
275
276 #define pv_kick(c)      __pv_kick(c)
277 #define pv_wait(p, v)   __pv_wait(p, v)
278
279 /*
280  * PV unfair trylock count tracking function
281  */
282 static inline int qstat_spin_steal_lock(struct qspinlock *lock)
283 {
284         int ret = pv_queued_spin_steal_lock(lock);
285
286         qstat_inc(qstat_pv_lock_stealing, ret);
287         return ret;
288 }
289 #undef  queued_spin_trylock
290 #define queued_spin_trylock(l)  qstat_spin_steal_lock(l)
291
292 #else /* CONFIG_QUEUED_LOCK_STAT */
293
294 static inline void qstat_inc(enum qlock_stats stat, bool cond)  { }
295 static inline void qstat_hop(int hopcnt)                        { }
296
297 #endif /* CONFIG_QUEUED_LOCK_STAT */