1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
13 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
16 struct etmv4_config *config = &drvdata->config;
18 idx = config->addr_idx;
21 * TRCACATRn.TYPE bit[1:0]: type of comparison
22 * the trace unit performs
24 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
29 * We are performing instruction address comparison. Set the
30 * relevant bit of ViewInst Include/Exclude Control register
31 * for corresponding address comparator pair.
33 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
34 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
37 if (exclude == true) {
39 * Set exclude bit and unset the include bit
40 * corresponding to comparator pair
42 config->viiectlr |= BIT(idx / 2 + 16);
43 config->viiectlr &= ~BIT(idx / 2);
46 * Set include bit and unset exclude bit
47 * corresponding to comparator pair
49 config->viiectlr |= BIT(idx / 2);
50 config->viiectlr &= ~BIT(idx / 2 + 16);
56 static ssize_t nr_pe_cmp_show(struct device *dev,
57 struct device_attribute *attr,
61 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
63 val = drvdata->nr_pe_cmp;
64 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
66 static DEVICE_ATTR_RO(nr_pe_cmp);
68 static ssize_t nr_addr_cmp_show(struct device *dev,
69 struct device_attribute *attr,
73 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
75 val = drvdata->nr_addr_cmp;
76 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
78 static DEVICE_ATTR_RO(nr_addr_cmp);
80 static ssize_t nr_cntr_show(struct device *dev,
81 struct device_attribute *attr,
85 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
87 val = drvdata->nr_cntr;
88 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
90 static DEVICE_ATTR_RO(nr_cntr);
92 static ssize_t nr_ext_inp_show(struct device *dev,
93 struct device_attribute *attr,
97 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
99 val = drvdata->nr_ext_inp;
100 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
102 static DEVICE_ATTR_RO(nr_ext_inp);
104 static ssize_t numcidc_show(struct device *dev,
105 struct device_attribute *attr,
109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
111 val = drvdata->numcidc;
112 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
114 static DEVICE_ATTR_RO(numcidc);
116 static ssize_t numvmidc_show(struct device *dev,
117 struct device_attribute *attr,
121 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
123 val = drvdata->numvmidc;
124 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
126 static DEVICE_ATTR_RO(numvmidc);
128 static ssize_t nrseqstate_show(struct device *dev,
129 struct device_attribute *attr,
133 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
135 val = drvdata->nrseqstate;
136 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
138 static DEVICE_ATTR_RO(nrseqstate);
140 static ssize_t nr_resource_show(struct device *dev,
141 struct device_attribute *attr,
145 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
147 val = drvdata->nr_resource;
148 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
150 static DEVICE_ATTR_RO(nr_resource);
152 static ssize_t nr_ss_cmp_show(struct device *dev,
153 struct device_attribute *attr,
157 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
159 val = drvdata->nr_ss_cmp;
160 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
162 static DEVICE_ATTR_RO(nr_ss_cmp);
164 static ssize_t reset_store(struct device *dev,
165 struct device_attribute *attr,
166 const char *buf, size_t size)
170 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
171 struct etmv4_config *config = &drvdata->config;
173 if (kstrtoul(buf, 16, &val))
176 spin_lock(&drvdata->spinlock);
180 /* Disable data tracing: do not trace load and store data transfers */
181 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
182 config->cfg &= ~(BIT(1) | BIT(2));
184 /* Disable data value and data address tracing */
185 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
186 ETM_MODE_DATA_TRACE_VAL);
187 config->cfg &= ~(BIT(16) | BIT(17));
189 /* Disable all events tracing */
190 config->eventctrl0 = 0x0;
191 config->eventctrl1 = 0x0;
193 /* Disable timestamp event */
194 config->ts_ctrl = 0x0;
196 /* Disable stalling */
197 config->stall_ctrl = 0x0;
199 /* Reset trace synchronization period to 2^8 = 256 bytes*/
200 if (drvdata->syncpr == false)
201 config->syncfreq = 0x8;
204 * Enable ViewInst to trace everything with start-stop logic in
205 * started state. ARM recommends start-stop logic is set before
208 config->vinst_ctrl |= BIT(0);
209 if (drvdata->nr_addr_cmp == true) {
210 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
211 /* SSSTATUS, bit[9] */
212 config->vinst_ctrl |= BIT(9);
215 /* No address range filtering for ViewInst */
216 config->viiectlr = 0x0;
218 /* No start-stop filtering for ViewInst */
219 config->vissctlr = 0x0;
221 /* Disable seq events */
222 for (i = 0; i < drvdata->nrseqstate-1; i++)
223 config->seq_ctrl[i] = 0x0;
224 config->seq_rst = 0x0;
225 config->seq_state = 0x0;
227 /* Disable external input events */
228 config->ext_inp = 0x0;
230 config->cntr_idx = 0x0;
231 for (i = 0; i < drvdata->nr_cntr; i++) {
232 config->cntrldvr[i] = 0x0;
233 config->cntr_ctrl[i] = 0x0;
234 config->cntr_val[i] = 0x0;
237 config->res_idx = 0x0;
238 for (i = 0; i < drvdata->nr_resource; i++)
239 config->res_ctrl[i] = 0x0;
241 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
242 config->ss_ctrl[i] = 0x0;
243 config->ss_pe_cmp[i] = 0x0;
246 config->addr_idx = 0x0;
247 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
248 config->addr_val[i] = 0x0;
249 config->addr_acc[i] = 0x0;
250 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
253 config->ctxid_idx = 0x0;
254 for (i = 0; i < drvdata->numcidc; i++)
255 config->ctxid_pid[i] = 0x0;
257 config->ctxid_mask0 = 0x0;
258 config->ctxid_mask1 = 0x0;
260 config->vmid_idx = 0x0;
261 for (i = 0; i < drvdata->numvmidc; i++)
262 config->vmid_val[i] = 0x0;
263 config->vmid_mask0 = 0x0;
264 config->vmid_mask1 = 0x0;
266 drvdata->trcid = drvdata->cpu + 1;
268 spin_unlock(&drvdata->spinlock);
272 static DEVICE_ATTR_WO(reset);
274 static ssize_t mode_show(struct device *dev,
275 struct device_attribute *attr,
279 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
280 struct etmv4_config *config = &drvdata->config;
283 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
286 static ssize_t mode_store(struct device *dev,
287 struct device_attribute *attr,
288 const char *buf, size_t size)
290 unsigned long val, mode;
291 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
292 struct etmv4_config *config = &drvdata->config;
294 if (kstrtoul(buf, 16, &val))
297 spin_lock(&drvdata->spinlock);
298 config->mode = val & ETMv4_MODE_ALL;
299 etm4_set_mode_exclude(drvdata,
300 config->mode & ETM_MODE_EXCLUDE ? true : false);
302 if (drvdata->instrp0 == true) {
303 /* start by clearing instruction P0 field */
304 config->cfg &= ~(BIT(1) | BIT(2));
305 if (config->mode & ETM_MODE_LOAD)
306 /* 0b01 Trace load instructions as P0 instructions */
307 config->cfg |= BIT(1);
308 if (config->mode & ETM_MODE_STORE)
309 /* 0b10 Trace store instructions as P0 instructions */
310 config->cfg |= BIT(2);
311 if (config->mode & ETM_MODE_LOAD_STORE)
313 * 0b11 Trace load and store instructions
316 config->cfg |= BIT(1) | BIT(2);
319 /* bit[3], Branch broadcast mode */
320 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
321 config->cfg |= BIT(3);
323 config->cfg &= ~BIT(3);
325 /* bit[4], Cycle counting instruction trace bit */
326 if ((config->mode & ETMv4_MODE_CYCACC) &&
327 (drvdata->trccci == true))
328 config->cfg |= BIT(4);
330 config->cfg &= ~BIT(4);
332 /* bit[6], Context ID tracing bit */
333 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
334 config->cfg |= BIT(6);
336 config->cfg &= ~BIT(6);
338 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
339 config->cfg |= BIT(7);
341 config->cfg &= ~BIT(7);
343 /* bits[10:8], Conditional instruction tracing bit */
344 mode = ETM_MODE_COND(config->mode);
345 if (drvdata->trccond == true) {
346 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
347 config->cfg |= mode << 8;
350 /* bit[11], Global timestamp tracing bit */
351 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
352 config->cfg |= BIT(11);
354 config->cfg &= ~BIT(11);
356 /* bit[12], Return stack enable bit */
357 if ((config->mode & ETM_MODE_RETURNSTACK) &&
358 (drvdata->retstack == true))
359 config->cfg |= BIT(12);
361 config->cfg &= ~BIT(12);
363 /* bits[14:13], Q element enable field */
364 mode = ETM_MODE_QELEM(config->mode);
365 /* start by clearing QE bits */
366 config->cfg &= ~(BIT(13) | BIT(14));
367 /* if supported, Q elements with instruction counts are enabled */
368 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
369 config->cfg |= BIT(13);
371 * if supported, Q elements with and without instruction
374 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
375 config->cfg |= BIT(14);
377 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
378 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
379 (drvdata->atbtrig == true))
380 config->eventctrl1 |= BIT(11);
382 config->eventctrl1 &= ~BIT(11);
384 /* bit[12], Low-power state behavior override bit */
385 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
386 (drvdata->lpoverride == true))
387 config->eventctrl1 |= BIT(12);
389 config->eventctrl1 &= ~BIT(12);
391 /* bit[8], Instruction stall bit */
392 if (config->mode & ETM_MODE_ISTALL_EN)
393 config->stall_ctrl |= BIT(8);
395 config->stall_ctrl &= ~BIT(8);
397 /* bit[10], Prioritize instruction trace bit */
398 if (config->mode & ETM_MODE_INSTPRIO)
399 config->stall_ctrl |= BIT(10);
401 config->stall_ctrl &= ~BIT(10);
403 /* bit[13], Trace overflow prevention bit */
404 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
405 (drvdata->nooverflow == true))
406 config->stall_ctrl |= BIT(13);
408 config->stall_ctrl &= ~BIT(13);
410 /* bit[9] Start/stop logic control bit */
411 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
412 config->vinst_ctrl |= BIT(9);
414 config->vinst_ctrl &= ~BIT(9);
416 /* bit[10], Whether a trace unit must trace a Reset exception */
417 if (config->mode & ETM_MODE_TRACE_RESET)
418 config->vinst_ctrl |= BIT(10);
420 config->vinst_ctrl &= ~BIT(10);
422 /* bit[11], Whether a trace unit must trace a system error exception */
423 if ((config->mode & ETM_MODE_TRACE_ERR) &&
424 (drvdata->trc_error == true))
425 config->vinst_ctrl |= BIT(11);
427 config->vinst_ctrl &= ~BIT(11);
429 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
430 etm4_config_trace_mode(config);
432 spin_unlock(&drvdata->spinlock);
436 static DEVICE_ATTR_RW(mode);
438 static ssize_t pe_show(struct device *dev,
439 struct device_attribute *attr,
443 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
444 struct etmv4_config *config = &drvdata->config;
446 val = config->pe_sel;
447 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
450 static ssize_t pe_store(struct device *dev,
451 struct device_attribute *attr,
452 const char *buf, size_t size)
455 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
456 struct etmv4_config *config = &drvdata->config;
458 if (kstrtoul(buf, 16, &val))
461 spin_lock(&drvdata->spinlock);
462 if (val > drvdata->nr_pe) {
463 spin_unlock(&drvdata->spinlock);
467 config->pe_sel = val;
468 spin_unlock(&drvdata->spinlock);
471 static DEVICE_ATTR_RW(pe);
473 static ssize_t event_show(struct device *dev,
474 struct device_attribute *attr,
478 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
479 struct etmv4_config *config = &drvdata->config;
481 val = config->eventctrl0;
482 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
485 static ssize_t event_store(struct device *dev,
486 struct device_attribute *attr,
487 const char *buf, size_t size)
490 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
491 struct etmv4_config *config = &drvdata->config;
493 if (kstrtoul(buf, 16, &val))
496 spin_lock(&drvdata->spinlock);
497 switch (drvdata->nr_event) {
499 /* EVENT0, bits[7:0] */
500 config->eventctrl0 = val & 0xFF;
503 /* EVENT1, bits[15:8] */
504 config->eventctrl0 = val & 0xFFFF;
507 /* EVENT2, bits[23:16] */
508 config->eventctrl0 = val & 0xFFFFFF;
511 /* EVENT3, bits[31:24] */
512 config->eventctrl0 = val;
517 spin_unlock(&drvdata->spinlock);
520 static DEVICE_ATTR_RW(event);
522 static ssize_t event_instren_show(struct device *dev,
523 struct device_attribute *attr,
527 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
528 struct etmv4_config *config = &drvdata->config;
530 val = BMVAL(config->eventctrl1, 0, 3);
531 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
534 static ssize_t event_instren_store(struct device *dev,
535 struct device_attribute *attr,
536 const char *buf, size_t size)
539 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
540 struct etmv4_config *config = &drvdata->config;
542 if (kstrtoul(buf, 16, &val))
545 spin_lock(&drvdata->spinlock);
546 /* start by clearing all instruction event enable bits */
547 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
548 switch (drvdata->nr_event) {
550 /* generate Event element for event 1 */
551 config->eventctrl1 |= val & BIT(1);
554 /* generate Event element for event 1 and 2 */
555 config->eventctrl1 |= val & (BIT(0) | BIT(1));
558 /* generate Event element for event 1, 2 and 3 */
559 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
562 /* generate Event element for all 4 events */
563 config->eventctrl1 |= val & 0xF;
568 spin_unlock(&drvdata->spinlock);
571 static DEVICE_ATTR_RW(event_instren);
573 static ssize_t event_ts_show(struct device *dev,
574 struct device_attribute *attr,
578 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
579 struct etmv4_config *config = &drvdata->config;
581 val = config->ts_ctrl;
582 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
585 static ssize_t event_ts_store(struct device *dev,
586 struct device_attribute *attr,
587 const char *buf, size_t size)
590 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
591 struct etmv4_config *config = &drvdata->config;
593 if (kstrtoul(buf, 16, &val))
595 if (!drvdata->ts_size)
598 config->ts_ctrl = val & ETMv4_EVENT_MASK;
601 static DEVICE_ATTR_RW(event_ts);
603 static ssize_t syncfreq_show(struct device *dev,
604 struct device_attribute *attr,
608 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
609 struct etmv4_config *config = &drvdata->config;
611 val = config->syncfreq;
612 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
615 static ssize_t syncfreq_store(struct device *dev,
616 struct device_attribute *attr,
617 const char *buf, size_t size)
620 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
621 struct etmv4_config *config = &drvdata->config;
623 if (kstrtoul(buf, 16, &val))
625 if (drvdata->syncpr == true)
628 config->syncfreq = val & ETMv4_SYNC_MASK;
631 static DEVICE_ATTR_RW(syncfreq);
633 static ssize_t cyc_threshold_show(struct device *dev,
634 struct device_attribute *attr,
638 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
639 struct etmv4_config *config = &drvdata->config;
641 val = config->ccctlr;
642 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
645 static ssize_t cyc_threshold_store(struct device *dev,
646 struct device_attribute *attr,
647 const char *buf, size_t size)
650 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
651 struct etmv4_config *config = &drvdata->config;
653 if (kstrtoul(buf, 16, &val))
655 if (val < drvdata->ccitmin)
658 config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
661 static DEVICE_ATTR_RW(cyc_threshold);
663 static ssize_t bb_ctrl_show(struct device *dev,
664 struct device_attribute *attr,
668 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
669 struct etmv4_config *config = &drvdata->config;
671 val = config->bb_ctrl;
672 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
675 static ssize_t bb_ctrl_store(struct device *dev,
676 struct device_attribute *attr,
677 const char *buf, size_t size)
680 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
681 struct etmv4_config *config = &drvdata->config;
683 if (kstrtoul(buf, 16, &val))
685 if (drvdata->trcbb == false)
687 if (!drvdata->nr_addr_cmp)
690 * Bit[7:0] selects which address range comparator is used for
691 * branch broadcast control.
693 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
696 config->bb_ctrl = val;
699 static DEVICE_ATTR_RW(bb_ctrl);
701 static ssize_t event_vinst_show(struct device *dev,
702 struct device_attribute *attr,
706 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
707 struct etmv4_config *config = &drvdata->config;
709 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
710 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
713 static ssize_t event_vinst_store(struct device *dev,
714 struct device_attribute *attr,
715 const char *buf, size_t size)
718 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
719 struct etmv4_config *config = &drvdata->config;
721 if (kstrtoul(buf, 16, &val))
724 spin_lock(&drvdata->spinlock);
725 val &= ETMv4_EVENT_MASK;
726 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
727 config->vinst_ctrl |= val;
728 spin_unlock(&drvdata->spinlock);
731 static DEVICE_ATTR_RW(event_vinst);
733 static ssize_t s_exlevel_vinst_show(struct device *dev,
734 struct device_attribute *attr,
738 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
739 struct etmv4_config *config = &drvdata->config;
741 val = BMVAL(config->vinst_ctrl, 16, 19);
742 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
745 static ssize_t s_exlevel_vinst_store(struct device *dev,
746 struct device_attribute *attr,
747 const char *buf, size_t size)
750 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
751 struct etmv4_config *config = &drvdata->config;
753 if (kstrtoul(buf, 16, &val))
756 spin_lock(&drvdata->spinlock);
757 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
758 config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
759 /* enable instruction tracing for corresponding exception level */
760 val &= drvdata->s_ex_level;
761 config->vinst_ctrl |= (val << 16);
762 spin_unlock(&drvdata->spinlock);
765 static DEVICE_ATTR_RW(s_exlevel_vinst);
767 static ssize_t ns_exlevel_vinst_show(struct device *dev,
768 struct device_attribute *attr,
772 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
773 struct etmv4_config *config = &drvdata->config;
775 /* EXLEVEL_NS, bits[23:20] */
776 val = BMVAL(config->vinst_ctrl, 20, 23);
777 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
780 static ssize_t ns_exlevel_vinst_store(struct device *dev,
781 struct device_attribute *attr,
782 const char *buf, size_t size)
785 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
786 struct etmv4_config *config = &drvdata->config;
788 if (kstrtoul(buf, 16, &val))
791 spin_lock(&drvdata->spinlock);
792 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
793 config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
794 /* enable instruction tracing for corresponding exception level */
795 val &= drvdata->ns_ex_level;
796 config->vinst_ctrl |= (val << 20);
797 spin_unlock(&drvdata->spinlock);
800 static DEVICE_ATTR_RW(ns_exlevel_vinst);
802 static ssize_t addr_idx_show(struct device *dev,
803 struct device_attribute *attr,
807 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
808 struct etmv4_config *config = &drvdata->config;
810 val = config->addr_idx;
811 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
814 static ssize_t addr_idx_store(struct device *dev,
815 struct device_attribute *attr,
816 const char *buf, size_t size)
819 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
820 struct etmv4_config *config = &drvdata->config;
822 if (kstrtoul(buf, 16, &val))
824 if (val >= drvdata->nr_addr_cmp * 2)
828 * Use spinlock to ensure index doesn't change while it gets
829 * dereferenced multiple times within a spinlock block elsewhere.
831 spin_lock(&drvdata->spinlock);
832 config->addr_idx = val;
833 spin_unlock(&drvdata->spinlock);
836 static DEVICE_ATTR_RW(addr_idx);
838 static ssize_t addr_instdatatype_show(struct device *dev,
839 struct device_attribute *attr,
844 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
845 struct etmv4_config *config = &drvdata->config;
847 spin_lock(&drvdata->spinlock);
848 idx = config->addr_idx;
849 val = BMVAL(config->addr_acc[idx], 0, 1);
850 len = scnprintf(buf, PAGE_SIZE, "%s\n",
851 val == ETM_INSTR_ADDR ? "instr" :
852 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
853 (val == ETM_DATA_STORE_ADDR ? "data_store" :
854 "data_load_store")));
855 spin_unlock(&drvdata->spinlock);
859 static ssize_t addr_instdatatype_store(struct device *dev,
860 struct device_attribute *attr,
861 const char *buf, size_t size)
865 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
866 struct etmv4_config *config = &drvdata->config;
868 if (strlen(buf) >= 20)
870 if (sscanf(buf, "%s", str) != 1)
873 spin_lock(&drvdata->spinlock);
874 idx = config->addr_idx;
875 if (!strcmp(str, "instr"))
876 /* TYPE, bits[1:0] */
877 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
879 spin_unlock(&drvdata->spinlock);
882 static DEVICE_ATTR_RW(addr_instdatatype);
884 static ssize_t addr_single_show(struct device *dev,
885 struct device_attribute *attr,
890 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
891 struct etmv4_config *config = &drvdata->config;
893 idx = config->addr_idx;
894 spin_lock(&drvdata->spinlock);
895 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
896 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
897 spin_unlock(&drvdata->spinlock);
900 val = (unsigned long)config->addr_val[idx];
901 spin_unlock(&drvdata->spinlock);
902 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
905 static ssize_t addr_single_store(struct device *dev,
906 struct device_attribute *attr,
907 const char *buf, size_t size)
911 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
912 struct etmv4_config *config = &drvdata->config;
914 if (kstrtoul(buf, 16, &val))
917 spin_lock(&drvdata->spinlock);
918 idx = config->addr_idx;
919 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
920 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
921 spin_unlock(&drvdata->spinlock);
925 config->addr_val[idx] = (u64)val;
926 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
927 spin_unlock(&drvdata->spinlock);
930 static DEVICE_ATTR_RW(addr_single);
932 static ssize_t addr_range_show(struct device *dev,
933 struct device_attribute *attr,
937 unsigned long val1, val2;
938 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
939 struct etmv4_config *config = &drvdata->config;
941 spin_lock(&drvdata->spinlock);
942 idx = config->addr_idx;
944 spin_unlock(&drvdata->spinlock);
947 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
948 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
949 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
950 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
951 spin_unlock(&drvdata->spinlock);
955 val1 = (unsigned long)config->addr_val[idx];
956 val2 = (unsigned long)config->addr_val[idx + 1];
957 spin_unlock(&drvdata->spinlock);
958 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
961 static ssize_t addr_range_store(struct device *dev,
962 struct device_attribute *attr,
963 const char *buf, size_t size)
966 unsigned long val1, val2;
967 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
968 struct etmv4_config *config = &drvdata->config;
970 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
972 /* lower address comparator cannot have a higher address value */
976 spin_lock(&drvdata->spinlock);
977 idx = config->addr_idx;
979 spin_unlock(&drvdata->spinlock);
983 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
984 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
985 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
986 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
987 spin_unlock(&drvdata->spinlock);
991 config->addr_val[idx] = (u64)val1;
992 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
993 config->addr_val[idx + 1] = (u64)val2;
994 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
996 * Program include or exclude control bits for vinst or vdata
997 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
999 etm4_set_mode_exclude(drvdata,
1000 config->mode & ETM_MODE_EXCLUDE ? true : false);
1002 spin_unlock(&drvdata->spinlock);
1005 static DEVICE_ATTR_RW(addr_range);
1007 static ssize_t addr_start_show(struct device *dev,
1008 struct device_attribute *attr,
1013 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1014 struct etmv4_config *config = &drvdata->config;
1016 spin_lock(&drvdata->spinlock);
1017 idx = config->addr_idx;
1019 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1020 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1021 spin_unlock(&drvdata->spinlock);
1025 val = (unsigned long)config->addr_val[idx];
1026 spin_unlock(&drvdata->spinlock);
1027 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1030 static ssize_t addr_start_store(struct device *dev,
1031 struct device_attribute *attr,
1032 const char *buf, size_t size)
1036 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1037 struct etmv4_config *config = &drvdata->config;
1039 if (kstrtoul(buf, 16, &val))
1042 spin_lock(&drvdata->spinlock);
1043 idx = config->addr_idx;
1044 if (!drvdata->nr_addr_cmp) {
1045 spin_unlock(&drvdata->spinlock);
1048 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1049 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1050 spin_unlock(&drvdata->spinlock);
1054 config->addr_val[idx] = (u64)val;
1055 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1056 config->vissctlr |= BIT(idx);
1057 /* SSSTATUS, bit[9] - turn on start/stop logic */
1058 config->vinst_ctrl |= BIT(9);
1059 spin_unlock(&drvdata->spinlock);
1062 static DEVICE_ATTR_RW(addr_start);
1064 static ssize_t addr_stop_show(struct device *dev,
1065 struct device_attribute *attr,
1070 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1071 struct etmv4_config *config = &drvdata->config;
1073 spin_lock(&drvdata->spinlock);
1074 idx = config->addr_idx;
1076 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1077 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1078 spin_unlock(&drvdata->spinlock);
1082 val = (unsigned long)config->addr_val[idx];
1083 spin_unlock(&drvdata->spinlock);
1084 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1087 static ssize_t addr_stop_store(struct device *dev,
1088 struct device_attribute *attr,
1089 const char *buf, size_t size)
1093 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1094 struct etmv4_config *config = &drvdata->config;
1096 if (kstrtoul(buf, 16, &val))
1099 spin_lock(&drvdata->spinlock);
1100 idx = config->addr_idx;
1101 if (!drvdata->nr_addr_cmp) {
1102 spin_unlock(&drvdata->spinlock);
1105 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1106 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1107 spin_unlock(&drvdata->spinlock);
1111 config->addr_val[idx] = (u64)val;
1112 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1113 config->vissctlr |= BIT(idx + 16);
1114 /* SSSTATUS, bit[9] - turn on start/stop logic */
1115 config->vinst_ctrl |= BIT(9);
1116 spin_unlock(&drvdata->spinlock);
1119 static DEVICE_ATTR_RW(addr_stop);
1121 static ssize_t addr_ctxtype_show(struct device *dev,
1122 struct device_attribute *attr,
1127 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1128 struct etmv4_config *config = &drvdata->config;
1130 spin_lock(&drvdata->spinlock);
1131 idx = config->addr_idx;
1132 /* CONTEXTTYPE, bits[3:2] */
1133 val = BMVAL(config->addr_acc[idx], 2, 3);
1134 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1135 (val == ETM_CTX_CTXID ? "ctxid" :
1136 (val == ETM_CTX_VMID ? "vmid" : "all")));
1137 spin_unlock(&drvdata->spinlock);
1141 static ssize_t addr_ctxtype_store(struct device *dev,
1142 struct device_attribute *attr,
1143 const char *buf, size_t size)
1147 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1148 struct etmv4_config *config = &drvdata->config;
1150 if (strlen(buf) >= 10)
1152 if (sscanf(buf, "%s", str) != 1)
1155 spin_lock(&drvdata->spinlock);
1156 idx = config->addr_idx;
1157 if (!strcmp(str, "none"))
1158 /* start by clearing context type bits */
1159 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1160 else if (!strcmp(str, "ctxid")) {
1161 /* 0b01 The trace unit performs a Context ID */
1162 if (drvdata->numcidc) {
1163 config->addr_acc[idx] |= BIT(2);
1164 config->addr_acc[idx] &= ~BIT(3);
1166 } else if (!strcmp(str, "vmid")) {
1167 /* 0b10 The trace unit performs a VMID */
1168 if (drvdata->numvmidc) {
1169 config->addr_acc[idx] &= ~BIT(2);
1170 config->addr_acc[idx] |= BIT(3);
1172 } else if (!strcmp(str, "all")) {
1174 * 0b11 The trace unit performs a Context ID
1175 * comparison and a VMID
1177 if (drvdata->numcidc)
1178 config->addr_acc[idx] |= BIT(2);
1179 if (drvdata->numvmidc)
1180 config->addr_acc[idx] |= BIT(3);
1182 spin_unlock(&drvdata->spinlock);
1185 static DEVICE_ATTR_RW(addr_ctxtype);
1187 static ssize_t addr_context_show(struct device *dev,
1188 struct device_attribute *attr,
1193 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1194 struct etmv4_config *config = &drvdata->config;
1196 spin_lock(&drvdata->spinlock);
1197 idx = config->addr_idx;
1198 /* context ID comparator bits[6:4] */
1199 val = BMVAL(config->addr_acc[idx], 4, 6);
1200 spin_unlock(&drvdata->spinlock);
1201 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1204 static ssize_t addr_context_store(struct device *dev,
1205 struct device_attribute *attr,
1206 const char *buf, size_t size)
1210 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1211 struct etmv4_config *config = &drvdata->config;
1213 if (kstrtoul(buf, 16, &val))
1215 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1217 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1218 drvdata->numcidc : drvdata->numvmidc))
1221 spin_lock(&drvdata->spinlock);
1222 idx = config->addr_idx;
1223 /* clear context ID comparator bits[6:4] */
1224 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1225 config->addr_acc[idx] |= (val << 4);
1226 spin_unlock(&drvdata->spinlock);
1229 static DEVICE_ATTR_RW(addr_context);
1231 static ssize_t seq_idx_show(struct device *dev,
1232 struct device_attribute *attr,
1236 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1237 struct etmv4_config *config = &drvdata->config;
1239 val = config->seq_idx;
1240 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1243 static ssize_t seq_idx_store(struct device *dev,
1244 struct device_attribute *attr,
1245 const char *buf, size_t size)
1248 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1249 struct etmv4_config *config = &drvdata->config;
1251 if (kstrtoul(buf, 16, &val))
1253 if (val >= drvdata->nrseqstate - 1)
1257 * Use spinlock to ensure index doesn't change while it gets
1258 * dereferenced multiple times within a spinlock block elsewhere.
1260 spin_lock(&drvdata->spinlock);
1261 config->seq_idx = val;
1262 spin_unlock(&drvdata->spinlock);
1265 static DEVICE_ATTR_RW(seq_idx);
1267 static ssize_t seq_state_show(struct device *dev,
1268 struct device_attribute *attr,
1272 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1273 struct etmv4_config *config = &drvdata->config;
1275 val = config->seq_state;
1276 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1279 static ssize_t seq_state_store(struct device *dev,
1280 struct device_attribute *attr,
1281 const char *buf, size_t size)
1284 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1285 struct etmv4_config *config = &drvdata->config;
1287 if (kstrtoul(buf, 16, &val))
1289 if (val >= drvdata->nrseqstate)
1292 config->seq_state = val;
1295 static DEVICE_ATTR_RW(seq_state);
1297 static ssize_t seq_event_show(struct device *dev,
1298 struct device_attribute *attr,
1303 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1304 struct etmv4_config *config = &drvdata->config;
1306 spin_lock(&drvdata->spinlock);
1307 idx = config->seq_idx;
1308 val = config->seq_ctrl[idx];
1309 spin_unlock(&drvdata->spinlock);
1310 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1313 static ssize_t seq_event_store(struct device *dev,
1314 struct device_attribute *attr,
1315 const char *buf, size_t size)
1319 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1320 struct etmv4_config *config = &drvdata->config;
1322 if (kstrtoul(buf, 16, &val))
1325 spin_lock(&drvdata->spinlock);
1326 idx = config->seq_idx;
1327 /* RST, bits[7:0] */
1328 config->seq_ctrl[idx] = val & 0xFF;
1329 spin_unlock(&drvdata->spinlock);
1332 static DEVICE_ATTR_RW(seq_event);
1334 static ssize_t seq_reset_event_show(struct device *dev,
1335 struct device_attribute *attr,
1339 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1340 struct etmv4_config *config = &drvdata->config;
1342 val = config->seq_rst;
1343 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1346 static ssize_t seq_reset_event_store(struct device *dev,
1347 struct device_attribute *attr,
1348 const char *buf, size_t size)
1351 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1352 struct etmv4_config *config = &drvdata->config;
1354 if (kstrtoul(buf, 16, &val))
1356 if (!(drvdata->nrseqstate))
1359 config->seq_rst = val & ETMv4_EVENT_MASK;
1362 static DEVICE_ATTR_RW(seq_reset_event);
1364 static ssize_t cntr_idx_show(struct device *dev,
1365 struct device_attribute *attr,
1369 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1370 struct etmv4_config *config = &drvdata->config;
1372 val = config->cntr_idx;
1373 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1376 static ssize_t cntr_idx_store(struct device *dev,
1377 struct device_attribute *attr,
1378 const char *buf, size_t size)
1381 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1382 struct etmv4_config *config = &drvdata->config;
1384 if (kstrtoul(buf, 16, &val))
1386 if (val >= drvdata->nr_cntr)
1390 * Use spinlock to ensure index doesn't change while it gets
1391 * dereferenced multiple times within a spinlock block elsewhere.
1393 spin_lock(&drvdata->spinlock);
1394 config->cntr_idx = val;
1395 spin_unlock(&drvdata->spinlock);
1398 static DEVICE_ATTR_RW(cntr_idx);
1400 static ssize_t cntrldvr_show(struct device *dev,
1401 struct device_attribute *attr,
1406 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1407 struct etmv4_config *config = &drvdata->config;
1409 spin_lock(&drvdata->spinlock);
1410 idx = config->cntr_idx;
1411 val = config->cntrldvr[idx];
1412 spin_unlock(&drvdata->spinlock);
1413 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1416 static ssize_t cntrldvr_store(struct device *dev,
1417 struct device_attribute *attr,
1418 const char *buf, size_t size)
1422 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1423 struct etmv4_config *config = &drvdata->config;
1425 if (kstrtoul(buf, 16, &val))
1427 if (val > ETM_CNTR_MAX_VAL)
1430 spin_lock(&drvdata->spinlock);
1431 idx = config->cntr_idx;
1432 config->cntrldvr[idx] = val;
1433 spin_unlock(&drvdata->spinlock);
1436 static DEVICE_ATTR_RW(cntrldvr);
1438 static ssize_t cntr_val_show(struct device *dev,
1439 struct device_attribute *attr,
1444 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1445 struct etmv4_config *config = &drvdata->config;
1447 spin_lock(&drvdata->spinlock);
1448 idx = config->cntr_idx;
1449 val = config->cntr_val[idx];
1450 spin_unlock(&drvdata->spinlock);
1451 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1454 static ssize_t cntr_val_store(struct device *dev,
1455 struct device_attribute *attr,
1456 const char *buf, size_t size)
1460 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1461 struct etmv4_config *config = &drvdata->config;
1463 if (kstrtoul(buf, 16, &val))
1465 if (val > ETM_CNTR_MAX_VAL)
1468 spin_lock(&drvdata->spinlock);
1469 idx = config->cntr_idx;
1470 config->cntr_val[idx] = val;
1471 spin_unlock(&drvdata->spinlock);
1474 static DEVICE_ATTR_RW(cntr_val);
1476 static ssize_t cntr_ctrl_show(struct device *dev,
1477 struct device_attribute *attr,
1482 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1483 struct etmv4_config *config = &drvdata->config;
1485 spin_lock(&drvdata->spinlock);
1486 idx = config->cntr_idx;
1487 val = config->cntr_ctrl[idx];
1488 spin_unlock(&drvdata->spinlock);
1489 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1492 static ssize_t cntr_ctrl_store(struct device *dev,
1493 struct device_attribute *attr,
1494 const char *buf, size_t size)
1498 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1499 struct etmv4_config *config = &drvdata->config;
1501 if (kstrtoul(buf, 16, &val))
1504 spin_lock(&drvdata->spinlock);
1505 idx = config->cntr_idx;
1506 config->cntr_ctrl[idx] = val;
1507 spin_unlock(&drvdata->spinlock);
1510 static DEVICE_ATTR_RW(cntr_ctrl);
1512 static ssize_t res_idx_show(struct device *dev,
1513 struct device_attribute *attr,
1517 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1518 struct etmv4_config *config = &drvdata->config;
1520 val = config->res_idx;
1521 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1524 static ssize_t res_idx_store(struct device *dev,
1525 struct device_attribute *attr,
1526 const char *buf, size_t size)
1529 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1530 struct etmv4_config *config = &drvdata->config;
1532 if (kstrtoul(buf, 16, &val))
1534 /* Resource selector pair 0 is always implemented and reserved */
1535 if ((val == 0) || (val >= drvdata->nr_resource))
1539 * Use spinlock to ensure index doesn't change while it gets
1540 * dereferenced multiple times within a spinlock block elsewhere.
1542 spin_lock(&drvdata->spinlock);
1543 config->res_idx = val;
1544 spin_unlock(&drvdata->spinlock);
1547 static DEVICE_ATTR_RW(res_idx);
1549 static ssize_t res_ctrl_show(struct device *dev,
1550 struct device_attribute *attr,
1555 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1556 struct etmv4_config *config = &drvdata->config;
1558 spin_lock(&drvdata->spinlock);
1559 idx = config->res_idx;
1560 val = config->res_ctrl[idx];
1561 spin_unlock(&drvdata->spinlock);
1562 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1565 static ssize_t res_ctrl_store(struct device *dev,
1566 struct device_attribute *attr,
1567 const char *buf, size_t size)
1571 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1572 struct etmv4_config *config = &drvdata->config;
1574 if (kstrtoul(buf, 16, &val))
1577 spin_lock(&drvdata->spinlock);
1578 idx = config->res_idx;
1579 /* For odd idx pair inversal bit is RES0 */
1581 /* PAIRINV, bit[21] */
1583 config->res_ctrl[idx] = val;
1584 spin_unlock(&drvdata->spinlock);
1587 static DEVICE_ATTR_RW(res_ctrl);
1589 static ssize_t ctxid_idx_show(struct device *dev,
1590 struct device_attribute *attr,
1594 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1595 struct etmv4_config *config = &drvdata->config;
1597 val = config->ctxid_idx;
1598 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1601 static ssize_t ctxid_idx_store(struct device *dev,
1602 struct device_attribute *attr,
1603 const char *buf, size_t size)
1606 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1607 struct etmv4_config *config = &drvdata->config;
1609 if (kstrtoul(buf, 16, &val))
1611 if (val >= drvdata->numcidc)
1615 * Use spinlock to ensure index doesn't change while it gets
1616 * dereferenced multiple times within a spinlock block elsewhere.
1618 spin_lock(&drvdata->spinlock);
1619 config->ctxid_idx = val;
1620 spin_unlock(&drvdata->spinlock);
1623 static DEVICE_ATTR_RW(ctxid_idx);
1625 static ssize_t ctxid_pid_show(struct device *dev,
1626 struct device_attribute *attr,
1631 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1632 struct etmv4_config *config = &drvdata->config;
1635 * Don't use contextID tracing if coming from a PID namespace. See
1636 * comment in ctxid_pid_store().
1638 if (task_active_pid_ns(current) != &init_pid_ns)
1641 spin_lock(&drvdata->spinlock);
1642 idx = config->ctxid_idx;
1643 val = (unsigned long)config->ctxid_pid[idx];
1644 spin_unlock(&drvdata->spinlock);
1645 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1648 static ssize_t ctxid_pid_store(struct device *dev,
1649 struct device_attribute *attr,
1650 const char *buf, size_t size)
1654 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1655 struct etmv4_config *config = &drvdata->config;
1658 * When contextID tracing is enabled the tracers will insert the
1659 * value found in the contextID register in the trace stream. But if
1660 * a process is in a namespace the PID of that process as seen from the
1661 * namespace won't be what the kernel sees, something that makes the
1662 * feature confusing and can potentially leak kernel only information.
1663 * As such refuse to use the feature if @current is not in the initial
1666 if (task_active_pid_ns(current) != &init_pid_ns)
1670 * only implemented when ctxid tracing is enabled, i.e. at least one
1671 * ctxid comparator is implemented and ctxid is greater than 0 bits
1674 if (!drvdata->ctxid_size || !drvdata->numcidc)
1676 if (kstrtoul(buf, 16, &pid))
1679 spin_lock(&drvdata->spinlock);
1680 idx = config->ctxid_idx;
1681 config->ctxid_pid[idx] = (u64)pid;
1682 spin_unlock(&drvdata->spinlock);
1685 static DEVICE_ATTR_RW(ctxid_pid);
1687 static ssize_t ctxid_masks_show(struct device *dev,
1688 struct device_attribute *attr,
1691 unsigned long val1, val2;
1692 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1693 struct etmv4_config *config = &drvdata->config;
1696 * Don't use contextID tracing if coming from a PID namespace. See
1697 * comment in ctxid_pid_store().
1699 if (task_active_pid_ns(current) != &init_pid_ns)
1702 spin_lock(&drvdata->spinlock);
1703 val1 = config->ctxid_mask0;
1704 val2 = config->ctxid_mask1;
1705 spin_unlock(&drvdata->spinlock);
1706 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1709 static ssize_t ctxid_masks_store(struct device *dev,
1710 struct device_attribute *attr,
1711 const char *buf, size_t size)
1714 unsigned long val1, val2, mask;
1715 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1716 struct etmv4_config *config = &drvdata->config;
1719 * Don't use contextID tracing if coming from a PID namespace. See
1720 * comment in ctxid_pid_store().
1722 if (task_active_pid_ns(current) != &init_pid_ns)
1726 * only implemented when ctxid tracing is enabled, i.e. at least one
1727 * ctxid comparator is implemented and ctxid is greater than 0 bits
1730 if (!drvdata->ctxid_size || !drvdata->numcidc)
1732 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1735 spin_lock(&drvdata->spinlock);
1737 * each byte[0..3] controls mask value applied to ctxid
1740 switch (drvdata->numcidc) {
1742 /* COMP0, bits[7:0] */
1743 config->ctxid_mask0 = val1 & 0xFF;
1746 /* COMP1, bits[15:8] */
1747 config->ctxid_mask0 = val1 & 0xFFFF;
1750 /* COMP2, bits[23:16] */
1751 config->ctxid_mask0 = val1 & 0xFFFFFF;
1754 /* COMP3, bits[31:24] */
1755 config->ctxid_mask0 = val1;
1758 /* COMP4, bits[7:0] */
1759 config->ctxid_mask0 = val1;
1760 config->ctxid_mask1 = val2 & 0xFF;
1763 /* COMP5, bits[15:8] */
1764 config->ctxid_mask0 = val1;
1765 config->ctxid_mask1 = val2 & 0xFFFF;
1768 /* COMP6, bits[23:16] */
1769 config->ctxid_mask0 = val1;
1770 config->ctxid_mask1 = val2 & 0xFFFFFF;
1773 /* COMP7, bits[31:24] */
1774 config->ctxid_mask0 = val1;
1775 config->ctxid_mask1 = val2;
1781 * If software sets a mask bit to 1, it must program relevant byte
1782 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1783 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1784 * of ctxid comparator0 value (corresponding to byte 0) register.
1786 mask = config->ctxid_mask0;
1787 for (i = 0; i < drvdata->numcidc; i++) {
1788 /* mask value of corresponding ctxid comparator */
1789 maskbyte = mask & ETMv4_EVENT_MASK;
1791 * each bit corresponds to a byte of respective ctxid comparator
1794 for (j = 0; j < 8; j++) {
1796 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
1799 /* Select the next ctxid comparator mask value */
1801 /* ctxid comparators[4-7] */
1802 mask = config->ctxid_mask1;
1807 spin_unlock(&drvdata->spinlock);
1810 static DEVICE_ATTR_RW(ctxid_masks);
1812 static ssize_t vmid_idx_show(struct device *dev,
1813 struct device_attribute *attr,
1817 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1818 struct etmv4_config *config = &drvdata->config;
1820 val = config->vmid_idx;
1821 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1824 static ssize_t vmid_idx_store(struct device *dev,
1825 struct device_attribute *attr,
1826 const char *buf, size_t size)
1829 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1830 struct etmv4_config *config = &drvdata->config;
1832 if (kstrtoul(buf, 16, &val))
1834 if (val >= drvdata->numvmidc)
1838 * Use spinlock to ensure index doesn't change while it gets
1839 * dereferenced multiple times within a spinlock block elsewhere.
1841 spin_lock(&drvdata->spinlock);
1842 config->vmid_idx = val;
1843 spin_unlock(&drvdata->spinlock);
1846 static DEVICE_ATTR_RW(vmid_idx);
1848 static ssize_t vmid_val_show(struct device *dev,
1849 struct device_attribute *attr,
1853 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1854 struct etmv4_config *config = &drvdata->config;
1856 val = (unsigned long)config->vmid_val[config->vmid_idx];
1857 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1860 static ssize_t vmid_val_store(struct device *dev,
1861 struct device_attribute *attr,
1862 const char *buf, size_t size)
1865 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1866 struct etmv4_config *config = &drvdata->config;
1869 * only implemented when vmid tracing is enabled, i.e. at least one
1870 * vmid comparator is implemented and at least 8 bit vmid size
1872 if (!drvdata->vmid_size || !drvdata->numvmidc)
1874 if (kstrtoul(buf, 16, &val))
1877 spin_lock(&drvdata->spinlock);
1878 config->vmid_val[config->vmid_idx] = (u64)val;
1879 spin_unlock(&drvdata->spinlock);
1882 static DEVICE_ATTR_RW(vmid_val);
1884 static ssize_t vmid_masks_show(struct device *dev,
1885 struct device_attribute *attr, char *buf)
1887 unsigned long val1, val2;
1888 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1889 struct etmv4_config *config = &drvdata->config;
1891 spin_lock(&drvdata->spinlock);
1892 val1 = config->vmid_mask0;
1893 val2 = config->vmid_mask1;
1894 spin_unlock(&drvdata->spinlock);
1895 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1898 static ssize_t vmid_masks_store(struct device *dev,
1899 struct device_attribute *attr,
1900 const char *buf, size_t size)
1903 unsigned long val1, val2, mask;
1904 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1905 struct etmv4_config *config = &drvdata->config;
1908 * only implemented when vmid tracing is enabled, i.e. at least one
1909 * vmid comparator is implemented and at least 8 bit vmid size
1911 if (!drvdata->vmid_size || !drvdata->numvmidc)
1913 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1916 spin_lock(&drvdata->spinlock);
1919 * each byte[0..3] controls mask value applied to vmid
1922 switch (drvdata->numvmidc) {
1924 /* COMP0, bits[7:0] */
1925 config->vmid_mask0 = val1 & 0xFF;
1928 /* COMP1, bits[15:8] */
1929 config->vmid_mask0 = val1 & 0xFFFF;
1932 /* COMP2, bits[23:16] */
1933 config->vmid_mask0 = val1 & 0xFFFFFF;
1936 /* COMP3, bits[31:24] */
1937 config->vmid_mask0 = val1;
1940 /* COMP4, bits[7:0] */
1941 config->vmid_mask0 = val1;
1942 config->vmid_mask1 = val2 & 0xFF;
1945 /* COMP5, bits[15:8] */
1946 config->vmid_mask0 = val1;
1947 config->vmid_mask1 = val2 & 0xFFFF;
1950 /* COMP6, bits[23:16] */
1951 config->vmid_mask0 = val1;
1952 config->vmid_mask1 = val2 & 0xFFFFFF;
1955 /* COMP7, bits[31:24] */
1956 config->vmid_mask0 = val1;
1957 config->vmid_mask1 = val2;
1964 * If software sets a mask bit to 1, it must program relevant byte
1965 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1966 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1967 * of vmid comparator0 value (corresponding to byte 0) register.
1969 mask = config->vmid_mask0;
1970 for (i = 0; i < drvdata->numvmidc; i++) {
1971 /* mask value of corresponding vmid comparator */
1972 maskbyte = mask & ETMv4_EVENT_MASK;
1974 * each bit corresponds to a byte of respective vmid comparator
1977 for (j = 0; j < 8; j++) {
1979 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
1982 /* Select the next vmid comparator mask value */
1984 /* vmid comparators[4-7] */
1985 mask = config->vmid_mask1;
1989 spin_unlock(&drvdata->spinlock);
1992 static DEVICE_ATTR_RW(vmid_masks);
1994 static ssize_t cpu_show(struct device *dev,
1995 struct device_attribute *attr, char *buf)
1998 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2001 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2004 static DEVICE_ATTR_RO(cpu);
2006 static struct attribute *coresight_etmv4_attrs[] = {
2007 &dev_attr_nr_pe_cmp.attr,
2008 &dev_attr_nr_addr_cmp.attr,
2009 &dev_attr_nr_cntr.attr,
2010 &dev_attr_nr_ext_inp.attr,
2011 &dev_attr_numcidc.attr,
2012 &dev_attr_numvmidc.attr,
2013 &dev_attr_nrseqstate.attr,
2014 &dev_attr_nr_resource.attr,
2015 &dev_attr_nr_ss_cmp.attr,
2016 &dev_attr_reset.attr,
2017 &dev_attr_mode.attr,
2019 &dev_attr_event.attr,
2020 &dev_attr_event_instren.attr,
2021 &dev_attr_event_ts.attr,
2022 &dev_attr_syncfreq.attr,
2023 &dev_attr_cyc_threshold.attr,
2024 &dev_attr_bb_ctrl.attr,
2025 &dev_attr_event_vinst.attr,
2026 &dev_attr_s_exlevel_vinst.attr,
2027 &dev_attr_ns_exlevel_vinst.attr,
2028 &dev_attr_addr_idx.attr,
2029 &dev_attr_addr_instdatatype.attr,
2030 &dev_attr_addr_single.attr,
2031 &dev_attr_addr_range.attr,
2032 &dev_attr_addr_start.attr,
2033 &dev_attr_addr_stop.attr,
2034 &dev_attr_addr_ctxtype.attr,
2035 &dev_attr_addr_context.attr,
2036 &dev_attr_seq_idx.attr,
2037 &dev_attr_seq_state.attr,
2038 &dev_attr_seq_event.attr,
2039 &dev_attr_seq_reset_event.attr,
2040 &dev_attr_cntr_idx.attr,
2041 &dev_attr_cntrldvr.attr,
2042 &dev_attr_cntr_val.attr,
2043 &dev_attr_cntr_ctrl.attr,
2044 &dev_attr_res_idx.attr,
2045 &dev_attr_res_ctrl.attr,
2046 &dev_attr_ctxid_idx.attr,
2047 &dev_attr_ctxid_pid.attr,
2048 &dev_attr_ctxid_masks.attr,
2049 &dev_attr_vmid_idx.attr,
2050 &dev_attr_vmid_val.attr,
2051 &dev_attr_vmid_masks.attr,
2061 static void do_smp_cross_read(void *data)
2063 struct etmv4_reg *reg = data;
2065 reg->data = readl_relaxed(reg->addr);
2068 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2070 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2071 struct etmv4_reg reg;
2073 reg.addr = drvdata->base + offset;
2075 * smp cross call ensures the CPU will be powered up before
2076 * accessing the ETMv4 trace core registers
2078 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2082 #define coresight_etm4x_reg(name, offset) \
2083 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2085 #define coresight_etm4x_cross_read(name, offset) \
2086 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2089 coresight_etm4x_reg(trcpdcr, TRCPDCR);
2090 coresight_etm4x_reg(trcpdsr, TRCPDSR);
2091 coresight_etm4x_reg(trclsr, TRCLSR);
2092 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2093 coresight_etm4x_reg(trcdevid, TRCDEVID);
2094 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2095 coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2096 coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2097 coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2098 coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2099 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2100 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2101 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2103 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2104 &dev_attr_trcoslsr.attr,
2105 &dev_attr_trcpdcr.attr,
2106 &dev_attr_trcpdsr.attr,
2107 &dev_attr_trclsr.attr,
2108 &dev_attr_trcconfig.attr,
2109 &dev_attr_trctraceid.attr,
2110 &dev_attr_trcauthstatus.attr,
2111 &dev_attr_trcdevid.attr,
2112 &dev_attr_trcdevtype.attr,
2113 &dev_attr_trcpidr0.attr,
2114 &dev_attr_trcpidr1.attr,
2115 &dev_attr_trcpidr2.attr,
2116 &dev_attr_trcpidr3.attr,
2120 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2121 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2122 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2123 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2124 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2125 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2126 /* trcidr[6,7] are reserved */
2127 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2128 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2129 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2130 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2131 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2132 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2134 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2135 &dev_attr_trcidr0.attr,
2136 &dev_attr_trcidr1.attr,
2137 &dev_attr_trcidr2.attr,
2138 &dev_attr_trcidr3.attr,
2139 &dev_attr_trcidr4.attr,
2140 &dev_attr_trcidr5.attr,
2141 /* trcidr[6,7] are reserved */
2142 &dev_attr_trcidr8.attr,
2143 &dev_attr_trcidr9.attr,
2144 &dev_attr_trcidr10.attr,
2145 &dev_attr_trcidr11.attr,
2146 &dev_attr_trcidr12.attr,
2147 &dev_attr_trcidr13.attr,
2151 static const struct attribute_group coresight_etmv4_group = {
2152 .attrs = coresight_etmv4_attrs,
2155 static const struct attribute_group coresight_etmv4_mgmt_group = {
2156 .attrs = coresight_etmv4_mgmt_attrs,
2160 static const struct attribute_group coresight_etmv4_trcidr_group = {
2161 .attrs = coresight_etmv4_trcidr_attrs,
2165 const struct attribute_group *coresight_etmv4_groups[] = {
2166 &coresight_etmv4_group,
2167 &coresight_etmv4_mgmt_group,
2168 &coresight_etmv4_trcidr_group,