2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * Authors: Waiman Long <waiman.long@hpe.com>
16 * When queued spinlock statistical counters are enabled, the following
17 * debugfs files will be created for reporting the counter values:
19 * <debugfs>/qlockstat/
20 * pv_hash_hops - average # of hops per hashing operation
21 * pv_kick_unlock - # of vCPU kicks issued at unlock time
22 * pv_kick_wake - # of vCPU kicks used for computing pv_latency_wake
23 * pv_latency_kick - average latency (ns) of vCPU kick operation
24 * pv_latency_wake - average latency (ns) from vCPU kick to wakeup
25 * pv_lock_stealing - # of lock stealing operations
26 * pv_spurious_wakeup - # of spurious wakeups in non-head vCPUs
27 * pv_wait_again - # of wait's after a queue head vCPU kick
28 * pv_wait_early - # of early vCPU wait's
29 * pv_wait_head - # of vCPU wait's at the queue head
30 * pv_wait_node - # of vCPU wait's at a non-head queue node
31 * lock_pending - # of locking operations via pending code
32 * lock_slowpath - # of locking operations via MCS lock queue
33 * lock_use_node2 - # of locking operations that use 2nd per-CPU node
34 * lock_use_node3 - # of locking operations that use 3rd per-CPU node
35 * lock_use_node4 - # of locking operations that use 4th per-CPU node
36 * lock_no_node - # of locking operations without using per-CPU node
38 * Subtracting lock_use_node[234] from lock_slowpath will give you
41 * Writing to the special ".reset_counts" file will reset all the above
44 * These statistical counters are implemented as per-cpu variables which are
45 * summed and computed whenever the corresponding debugfs files are read. This
46 * minimizes added overhead making the counters usable even in a production
49 * There may be slight difference between pv_kick_wake and pv_kick_unlock.
51 #include "lock_events.h"
53 #ifdef CONFIG_QUEUED_LOCK_STAT
55 * Collect pvqspinlock statistics
57 #include <linux/debugfs.h>
58 #include <linux/sched.h>
59 #include <linux/sched/clock.h>
62 #define EVENT_COUNT(ev) lockevents[LOCKEVENT_ ## ev]
65 #define LOCK_EVENT(name) [LOCKEVENT_ ## name] = #name,
67 static const char * const lockevent_names[lockevent_num + 1] = {
69 #include "lock_events_list.h"
71 [LOCKEVENT_reset_cnts] = ".reset_counts",
77 DEFINE_PER_CPU(unsigned long, lockevents[lockevent_num]);
78 static DEFINE_PER_CPU(u64, pv_kick_time);
81 * Function to read and return the qlock statistical counter values
83 * The following counters are handled specially:
85 * Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
87 * Average wake latency (ns) = pv_latency_wake/pv_kick_wake
89 * Average hops/hash = pv_hash_hops/pv_kick_unlock
91 static ssize_t lockevent_read(struct file *file, char __user *user_buf,
92 size_t count, loff_t *ppos)
96 u64 sum = 0, kicks = 0;
99 * Get the counter ID stored in file->f_inode->i_private
101 id = (long)file_inode(file)->i_private;
103 if (id >= lockevent_num)
106 for_each_possible_cpu(cpu) {
107 sum += per_cpu(lockevents[id], cpu);
109 * Need to sum additional counters for some of them
113 case LOCKEVENT_pv_latency_kick:
114 case LOCKEVENT_pv_hash_hops:
115 kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
118 case LOCKEVENT_pv_latency_wake:
119 kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
124 if (id == LOCKEVENT_pv_hash_hops) {
128 frac = 100ULL * do_div(sum, kicks);
129 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
133 * Return a X.XX decimal number
135 len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n",
139 * Round to the nearest ns
141 if ((id == LOCKEVENT_pv_latency_kick) ||
142 (id == LOCKEVENT_pv_latency_wake)) {
144 sum = DIV_ROUND_CLOSEST_ULL(sum, kicks);
146 len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
149 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
153 * Function to handle write request
155 * When id = .reset_cnts, reset all the counter values.
157 static ssize_t lockevent_write(struct file *file, const char __user *user_buf,
158 size_t count, loff_t *ppos)
163 * Get the counter ID stored in file->f_inode->i_private
165 if ((long)file_inode(file)->i_private != LOCKEVENT_reset_cnts)
168 for_each_possible_cpu(cpu) {
170 unsigned long *ptr = per_cpu_ptr(lockevents, cpu);
172 for (i = 0 ; i < lockevent_num; i++)
173 WRITE_ONCE(ptr[i], 0);
179 * Debugfs data structures
181 static const struct file_operations fops_lockevent = {
182 .read = lockevent_read,
183 .write = lockevent_write,
184 .llseek = default_llseek,
188 * Initialize debugfs for the qspinlock statistical counters
190 static int __init init_qspinlock_stat(void)
192 struct dentry *d_counts = debugfs_create_dir("qlockstat", NULL);
199 * Create the debugfs files
201 * As reading from and writing to the stat files can be slow, only
202 * root is allowed to do the read/write to limit impact to system
205 for (i = 0; i < lockevent_num; i++)
206 if (!debugfs_create_file(lockevent_names[i], 0400, d_counts,
207 (void *)(long)i, &fops_lockevent))
210 if (!debugfs_create_file(lockevent_names[LOCKEVENT_reset_cnts], 0200,
211 d_counts, (void *)(long)LOCKEVENT_reset_cnts,
217 debugfs_remove_recursive(d_counts);
219 pr_warn("Could not create 'qlockstat' debugfs entries\n");
222 fs_initcall(init_qspinlock_stat);
227 static inline void lockevent_pv_hop(int hopcnt)
229 this_cpu_add(EVENT_COUNT(pv_hash_hops), hopcnt);
233 * Replacement function for pv_kick()
235 static inline void __pv_kick(int cpu)
237 u64 start = sched_clock();
239 per_cpu(pv_kick_time, cpu) = start;
241 this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start);
245 * Replacement function for pv_wait()
247 static inline void __pv_wait(u8 *ptr, u8 val)
249 u64 *pkick_time = this_cpu_ptr(&pv_kick_time);
254 this_cpu_add(EVENT_COUNT(pv_latency_wake),
255 sched_clock() - *pkick_time);
256 lockevent_inc(pv_kick_wake);
260 #define pv_kick(c) __pv_kick(c)
261 #define pv_wait(p, v) __pv_wait(p, v)
263 #else /* CONFIG_QUEUED_LOCK_STAT */
265 static inline void lockevent_pv_hop(int hopcnt) { }
267 #endif /* CONFIG_QUEUED_LOCK_STAT */