1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
13 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
16 struct etmv4_config *config = &drvdata->config;
18 idx = config->addr_idx;
21 * TRCACATRn.TYPE bit[1:0]: type of comparison
22 * the trace unit performs
24 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
29 * We are performing instruction address comparison. Set the
30 * relevant bit of ViewInst Include/Exclude Control register
31 * for corresponding address comparator pair.
33 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
34 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
37 if (exclude == true) {
39 * Set exclude bit and unset the include bit
40 * corresponding to comparator pair
42 config->viiectlr |= BIT(idx / 2 + 16);
43 config->viiectlr &= ~BIT(idx / 2);
46 * Set include bit and unset exclude bit
47 * corresponding to comparator pair
49 config->viiectlr |= BIT(idx / 2);
50 config->viiectlr &= ~BIT(idx / 2 + 16);
56 static ssize_t nr_pe_cmp_show(struct device *dev,
57 struct device_attribute *attr,
61 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
63 val = drvdata->nr_pe_cmp;
64 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
66 static DEVICE_ATTR_RO(nr_pe_cmp);
68 static ssize_t nr_addr_cmp_show(struct device *dev,
69 struct device_attribute *attr,
73 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
75 val = drvdata->nr_addr_cmp;
76 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
78 static DEVICE_ATTR_RO(nr_addr_cmp);
80 static ssize_t nr_cntr_show(struct device *dev,
81 struct device_attribute *attr,
85 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
87 val = drvdata->nr_cntr;
88 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
90 static DEVICE_ATTR_RO(nr_cntr);
92 static ssize_t nr_ext_inp_show(struct device *dev,
93 struct device_attribute *attr,
97 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
99 val = drvdata->nr_ext_inp;
100 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
102 static DEVICE_ATTR_RO(nr_ext_inp);
104 static ssize_t numcidc_show(struct device *dev,
105 struct device_attribute *attr,
109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
111 val = drvdata->numcidc;
112 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
114 static DEVICE_ATTR_RO(numcidc);
116 static ssize_t numvmidc_show(struct device *dev,
117 struct device_attribute *attr,
121 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
123 val = drvdata->numvmidc;
124 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
126 static DEVICE_ATTR_RO(numvmidc);
128 static ssize_t nrseqstate_show(struct device *dev,
129 struct device_attribute *attr,
133 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
135 val = drvdata->nrseqstate;
136 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
138 static DEVICE_ATTR_RO(nrseqstate);
140 static ssize_t nr_resource_show(struct device *dev,
141 struct device_attribute *attr,
145 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
147 val = drvdata->nr_resource;
148 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
150 static DEVICE_ATTR_RO(nr_resource);
152 static ssize_t nr_ss_cmp_show(struct device *dev,
153 struct device_attribute *attr,
157 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
159 val = drvdata->nr_ss_cmp;
160 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
162 static DEVICE_ATTR_RO(nr_ss_cmp);
164 static ssize_t reset_store(struct device *dev,
165 struct device_attribute *attr,
166 const char *buf, size_t size)
170 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
171 struct etmv4_config *config = &drvdata->config;
173 if (kstrtoul(buf, 16, &val))
176 spin_lock(&drvdata->spinlock);
180 /* Disable data tracing: do not trace load and store data transfers */
181 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
182 config->cfg &= ~(BIT(1) | BIT(2));
184 /* Disable data value and data address tracing */
185 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
186 ETM_MODE_DATA_TRACE_VAL);
187 config->cfg &= ~(BIT(16) | BIT(17));
189 /* Disable all events tracing */
190 config->eventctrl0 = 0x0;
191 config->eventctrl1 = 0x0;
193 /* Disable timestamp event */
194 config->ts_ctrl = 0x0;
196 /* Disable stalling */
197 config->stall_ctrl = 0x0;
199 /* Reset trace synchronization period to 2^8 = 256 bytes*/
200 if (drvdata->syncpr == false)
201 config->syncfreq = 0x8;
204 * Enable ViewInst to trace everything with start-stop logic in
205 * started state. ARM recommends start-stop logic is set before
208 config->vinst_ctrl |= BIT(0);
209 if (drvdata->nr_addr_cmp == true) {
210 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
211 /* SSSTATUS, bit[9] */
212 config->vinst_ctrl |= BIT(9);
215 /* No address range filtering for ViewInst */
216 config->viiectlr = 0x0;
218 /* No start-stop filtering for ViewInst */
219 config->vissctlr = 0x0;
221 /* Disable seq events */
222 for (i = 0; i < drvdata->nrseqstate-1; i++)
223 config->seq_ctrl[i] = 0x0;
224 config->seq_rst = 0x0;
225 config->seq_state = 0x0;
227 /* Disable external input events */
228 config->ext_inp = 0x0;
230 config->cntr_idx = 0x0;
231 for (i = 0; i < drvdata->nr_cntr; i++) {
232 config->cntrldvr[i] = 0x0;
233 config->cntr_ctrl[i] = 0x0;
234 config->cntr_val[i] = 0x0;
237 config->res_idx = 0x0;
238 for (i = 0; i < drvdata->nr_resource; i++)
239 config->res_ctrl[i] = 0x0;
241 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
242 config->ss_ctrl[i] = 0x0;
243 config->ss_pe_cmp[i] = 0x0;
246 config->addr_idx = 0x0;
247 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
248 config->addr_val[i] = 0x0;
249 config->addr_acc[i] = 0x0;
250 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
253 config->ctxid_idx = 0x0;
254 for (i = 0; i < drvdata->numcidc; i++)
255 config->ctxid_pid[i] = 0x0;
257 config->ctxid_mask0 = 0x0;
258 config->ctxid_mask1 = 0x0;
260 config->vmid_idx = 0x0;
261 for (i = 0; i < drvdata->numvmidc; i++)
262 config->vmid_val[i] = 0x0;
263 config->vmid_mask0 = 0x0;
264 config->vmid_mask1 = 0x0;
266 drvdata->trcid = drvdata->cpu + 1;
268 spin_unlock(&drvdata->spinlock);
272 static DEVICE_ATTR_WO(reset);
274 static ssize_t mode_show(struct device *dev,
275 struct device_attribute *attr,
279 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
280 struct etmv4_config *config = &drvdata->config;
283 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
286 static ssize_t mode_store(struct device *dev,
287 struct device_attribute *attr,
288 const char *buf, size_t size)
290 unsigned long val, mode;
291 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
292 struct etmv4_config *config = &drvdata->config;
294 if (kstrtoul(buf, 16, &val))
297 spin_lock(&drvdata->spinlock);
298 config->mode = val & ETMv4_MODE_ALL;
300 if (config->mode & ETM_MODE_EXCLUDE)
301 etm4_set_mode_exclude(drvdata, true);
303 etm4_set_mode_exclude(drvdata, false);
305 if (drvdata->instrp0 == true) {
306 /* start by clearing instruction P0 field */
307 config->cfg &= ~(BIT(1) | BIT(2));
308 if (config->mode & ETM_MODE_LOAD)
309 /* 0b01 Trace load instructions as P0 instructions */
310 config->cfg |= BIT(1);
311 if (config->mode & ETM_MODE_STORE)
312 /* 0b10 Trace store instructions as P0 instructions */
313 config->cfg |= BIT(2);
314 if (config->mode & ETM_MODE_LOAD_STORE)
316 * 0b11 Trace load and store instructions
319 config->cfg |= BIT(1) | BIT(2);
322 /* bit[3], Branch broadcast mode */
323 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
324 config->cfg |= BIT(3);
326 config->cfg &= ~BIT(3);
328 /* bit[4], Cycle counting instruction trace bit */
329 if ((config->mode & ETMv4_MODE_CYCACC) &&
330 (drvdata->trccci == true))
331 config->cfg |= BIT(4);
333 config->cfg &= ~BIT(4);
335 /* bit[6], Context ID tracing bit */
336 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
337 config->cfg |= BIT(6);
339 config->cfg &= ~BIT(6);
341 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
342 config->cfg |= BIT(7);
344 config->cfg &= ~BIT(7);
346 /* bits[10:8], Conditional instruction tracing bit */
347 mode = ETM_MODE_COND(config->mode);
348 if (drvdata->trccond == true) {
349 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
350 config->cfg |= mode << 8;
353 /* bit[11], Global timestamp tracing bit */
354 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
355 config->cfg |= BIT(11);
357 config->cfg &= ~BIT(11);
359 /* bit[12], Return stack enable bit */
360 if ((config->mode & ETM_MODE_RETURNSTACK) &&
361 (drvdata->retstack == true))
362 config->cfg |= BIT(12);
364 config->cfg &= ~BIT(12);
366 /* bits[14:13], Q element enable field */
367 mode = ETM_MODE_QELEM(config->mode);
368 /* start by clearing QE bits */
369 config->cfg &= ~(BIT(13) | BIT(14));
370 /* if supported, Q elements with instruction counts are enabled */
371 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
372 config->cfg |= BIT(13);
374 * if supported, Q elements with and without instruction
377 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
378 config->cfg |= BIT(14);
380 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
381 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
382 (drvdata->atbtrig == true))
383 config->eventctrl1 |= BIT(11);
385 config->eventctrl1 &= ~BIT(11);
387 /* bit[12], Low-power state behavior override bit */
388 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
389 (drvdata->lpoverride == true))
390 config->eventctrl1 |= BIT(12);
392 config->eventctrl1 &= ~BIT(12);
394 /* bit[8], Instruction stall bit */
395 if (config->mode & ETM_MODE_ISTALL_EN)
396 config->stall_ctrl |= BIT(8);
398 config->stall_ctrl &= ~BIT(8);
400 /* bit[10], Prioritize instruction trace bit */
401 if (config->mode & ETM_MODE_INSTPRIO)
402 config->stall_ctrl |= BIT(10);
404 config->stall_ctrl &= ~BIT(10);
406 /* bit[13], Trace overflow prevention bit */
407 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
408 (drvdata->nooverflow == true))
409 config->stall_ctrl |= BIT(13);
411 config->stall_ctrl &= ~BIT(13);
413 /* bit[9] Start/stop logic control bit */
414 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
415 config->vinst_ctrl |= BIT(9);
417 config->vinst_ctrl &= ~BIT(9);
419 /* bit[10], Whether a trace unit must trace a Reset exception */
420 if (config->mode & ETM_MODE_TRACE_RESET)
421 config->vinst_ctrl |= BIT(10);
423 config->vinst_ctrl &= ~BIT(10);
425 /* bit[11], Whether a trace unit must trace a system error exception */
426 if ((config->mode & ETM_MODE_TRACE_ERR) &&
427 (drvdata->trc_error == true))
428 config->vinst_ctrl |= BIT(11);
430 config->vinst_ctrl &= ~BIT(11);
432 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
433 etm4_config_trace_mode(config);
435 spin_unlock(&drvdata->spinlock);
439 static DEVICE_ATTR_RW(mode);
441 static ssize_t pe_show(struct device *dev,
442 struct device_attribute *attr,
446 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
447 struct etmv4_config *config = &drvdata->config;
449 val = config->pe_sel;
450 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
453 static ssize_t pe_store(struct device *dev,
454 struct device_attribute *attr,
455 const char *buf, size_t size)
458 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
459 struct etmv4_config *config = &drvdata->config;
461 if (kstrtoul(buf, 16, &val))
464 spin_lock(&drvdata->spinlock);
465 if (val > drvdata->nr_pe) {
466 spin_unlock(&drvdata->spinlock);
470 config->pe_sel = val;
471 spin_unlock(&drvdata->spinlock);
474 static DEVICE_ATTR_RW(pe);
476 static ssize_t event_show(struct device *dev,
477 struct device_attribute *attr,
481 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
482 struct etmv4_config *config = &drvdata->config;
484 val = config->eventctrl0;
485 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
488 static ssize_t event_store(struct device *dev,
489 struct device_attribute *attr,
490 const char *buf, size_t size)
493 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
494 struct etmv4_config *config = &drvdata->config;
496 if (kstrtoul(buf, 16, &val))
499 spin_lock(&drvdata->spinlock);
500 switch (drvdata->nr_event) {
502 /* EVENT0, bits[7:0] */
503 config->eventctrl0 = val & 0xFF;
506 /* EVENT1, bits[15:8] */
507 config->eventctrl0 = val & 0xFFFF;
510 /* EVENT2, bits[23:16] */
511 config->eventctrl0 = val & 0xFFFFFF;
514 /* EVENT3, bits[31:24] */
515 config->eventctrl0 = val;
520 spin_unlock(&drvdata->spinlock);
523 static DEVICE_ATTR_RW(event);
525 static ssize_t event_instren_show(struct device *dev,
526 struct device_attribute *attr,
530 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
531 struct etmv4_config *config = &drvdata->config;
533 val = BMVAL(config->eventctrl1, 0, 3);
534 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
537 static ssize_t event_instren_store(struct device *dev,
538 struct device_attribute *attr,
539 const char *buf, size_t size)
542 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
543 struct etmv4_config *config = &drvdata->config;
545 if (kstrtoul(buf, 16, &val))
548 spin_lock(&drvdata->spinlock);
549 /* start by clearing all instruction event enable bits */
550 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
551 switch (drvdata->nr_event) {
553 /* generate Event element for event 1 */
554 config->eventctrl1 |= val & BIT(1);
557 /* generate Event element for event 1 and 2 */
558 config->eventctrl1 |= val & (BIT(0) | BIT(1));
561 /* generate Event element for event 1, 2 and 3 */
562 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
565 /* generate Event element for all 4 events */
566 config->eventctrl1 |= val & 0xF;
571 spin_unlock(&drvdata->spinlock);
574 static DEVICE_ATTR_RW(event_instren);
576 static ssize_t event_ts_show(struct device *dev,
577 struct device_attribute *attr,
581 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
582 struct etmv4_config *config = &drvdata->config;
584 val = config->ts_ctrl;
585 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
588 static ssize_t event_ts_store(struct device *dev,
589 struct device_attribute *attr,
590 const char *buf, size_t size)
593 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
594 struct etmv4_config *config = &drvdata->config;
596 if (kstrtoul(buf, 16, &val))
598 if (!drvdata->ts_size)
601 config->ts_ctrl = val & ETMv4_EVENT_MASK;
604 static DEVICE_ATTR_RW(event_ts);
606 static ssize_t syncfreq_show(struct device *dev,
607 struct device_attribute *attr,
611 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
612 struct etmv4_config *config = &drvdata->config;
614 val = config->syncfreq;
615 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
618 static ssize_t syncfreq_store(struct device *dev,
619 struct device_attribute *attr,
620 const char *buf, size_t size)
623 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
624 struct etmv4_config *config = &drvdata->config;
626 if (kstrtoul(buf, 16, &val))
628 if (drvdata->syncpr == true)
631 config->syncfreq = val & ETMv4_SYNC_MASK;
634 static DEVICE_ATTR_RW(syncfreq);
636 static ssize_t cyc_threshold_show(struct device *dev,
637 struct device_attribute *attr,
641 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
642 struct etmv4_config *config = &drvdata->config;
644 val = config->ccctlr;
645 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
648 static ssize_t cyc_threshold_store(struct device *dev,
649 struct device_attribute *attr,
650 const char *buf, size_t size)
653 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
654 struct etmv4_config *config = &drvdata->config;
656 if (kstrtoul(buf, 16, &val))
658 if (val < drvdata->ccitmin)
661 config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
664 static DEVICE_ATTR_RW(cyc_threshold);
666 static ssize_t bb_ctrl_show(struct device *dev,
667 struct device_attribute *attr,
671 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
672 struct etmv4_config *config = &drvdata->config;
674 val = config->bb_ctrl;
675 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
678 static ssize_t bb_ctrl_store(struct device *dev,
679 struct device_attribute *attr,
680 const char *buf, size_t size)
683 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
684 struct etmv4_config *config = &drvdata->config;
686 if (kstrtoul(buf, 16, &val))
688 if (drvdata->trcbb == false)
690 if (!drvdata->nr_addr_cmp)
693 * Bit[7:0] selects which address range comparator is used for
694 * branch broadcast control.
696 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
699 config->bb_ctrl = val;
702 static DEVICE_ATTR_RW(bb_ctrl);
704 static ssize_t event_vinst_show(struct device *dev,
705 struct device_attribute *attr,
709 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
710 struct etmv4_config *config = &drvdata->config;
712 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
713 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
716 static ssize_t event_vinst_store(struct device *dev,
717 struct device_attribute *attr,
718 const char *buf, size_t size)
721 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
722 struct etmv4_config *config = &drvdata->config;
724 if (kstrtoul(buf, 16, &val))
727 spin_lock(&drvdata->spinlock);
728 val &= ETMv4_EVENT_MASK;
729 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
730 config->vinst_ctrl |= val;
731 spin_unlock(&drvdata->spinlock);
734 static DEVICE_ATTR_RW(event_vinst);
736 static ssize_t s_exlevel_vinst_show(struct device *dev,
737 struct device_attribute *attr,
741 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
742 struct etmv4_config *config = &drvdata->config;
744 val = BMVAL(config->vinst_ctrl, 16, 19);
745 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
748 static ssize_t s_exlevel_vinst_store(struct device *dev,
749 struct device_attribute *attr,
750 const char *buf, size_t size)
753 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
754 struct etmv4_config *config = &drvdata->config;
756 if (kstrtoul(buf, 16, &val))
759 spin_lock(&drvdata->spinlock);
760 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
761 config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
762 /* enable instruction tracing for corresponding exception level */
763 val &= drvdata->s_ex_level;
764 config->vinst_ctrl |= (val << 16);
765 spin_unlock(&drvdata->spinlock);
768 static DEVICE_ATTR_RW(s_exlevel_vinst);
770 static ssize_t ns_exlevel_vinst_show(struct device *dev,
771 struct device_attribute *attr,
775 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
776 struct etmv4_config *config = &drvdata->config;
778 /* EXLEVEL_NS, bits[23:20] */
779 val = BMVAL(config->vinst_ctrl, 20, 23);
780 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
783 static ssize_t ns_exlevel_vinst_store(struct device *dev,
784 struct device_attribute *attr,
785 const char *buf, size_t size)
788 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
789 struct etmv4_config *config = &drvdata->config;
791 if (kstrtoul(buf, 16, &val))
794 spin_lock(&drvdata->spinlock);
795 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
796 config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
797 /* enable instruction tracing for corresponding exception level */
798 val &= drvdata->ns_ex_level;
799 config->vinst_ctrl |= (val << 20);
800 spin_unlock(&drvdata->spinlock);
803 static DEVICE_ATTR_RW(ns_exlevel_vinst);
805 static ssize_t addr_idx_show(struct device *dev,
806 struct device_attribute *attr,
810 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
811 struct etmv4_config *config = &drvdata->config;
813 val = config->addr_idx;
814 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
817 static ssize_t addr_idx_store(struct device *dev,
818 struct device_attribute *attr,
819 const char *buf, size_t size)
822 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
823 struct etmv4_config *config = &drvdata->config;
825 if (kstrtoul(buf, 16, &val))
827 if (val >= drvdata->nr_addr_cmp * 2)
831 * Use spinlock to ensure index doesn't change while it gets
832 * dereferenced multiple times within a spinlock block elsewhere.
834 spin_lock(&drvdata->spinlock);
835 config->addr_idx = val;
836 spin_unlock(&drvdata->spinlock);
839 static DEVICE_ATTR_RW(addr_idx);
841 static ssize_t addr_instdatatype_show(struct device *dev,
842 struct device_attribute *attr,
847 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
848 struct etmv4_config *config = &drvdata->config;
850 spin_lock(&drvdata->spinlock);
851 idx = config->addr_idx;
852 val = BMVAL(config->addr_acc[idx], 0, 1);
853 len = scnprintf(buf, PAGE_SIZE, "%s\n",
854 val == ETM_INSTR_ADDR ? "instr" :
855 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
856 (val == ETM_DATA_STORE_ADDR ? "data_store" :
857 "data_load_store")));
858 spin_unlock(&drvdata->spinlock);
862 static ssize_t addr_instdatatype_store(struct device *dev,
863 struct device_attribute *attr,
864 const char *buf, size_t size)
868 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
869 struct etmv4_config *config = &drvdata->config;
871 if (strlen(buf) >= 20)
873 if (sscanf(buf, "%s", str) != 1)
876 spin_lock(&drvdata->spinlock);
877 idx = config->addr_idx;
878 if (!strcmp(str, "instr"))
879 /* TYPE, bits[1:0] */
880 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
882 spin_unlock(&drvdata->spinlock);
885 static DEVICE_ATTR_RW(addr_instdatatype);
887 static ssize_t addr_single_show(struct device *dev,
888 struct device_attribute *attr,
893 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
894 struct etmv4_config *config = &drvdata->config;
896 idx = config->addr_idx;
897 spin_lock(&drvdata->spinlock);
898 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
899 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
900 spin_unlock(&drvdata->spinlock);
903 val = (unsigned long)config->addr_val[idx];
904 spin_unlock(&drvdata->spinlock);
905 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
908 static ssize_t addr_single_store(struct device *dev,
909 struct device_attribute *attr,
910 const char *buf, size_t size)
914 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
915 struct etmv4_config *config = &drvdata->config;
917 if (kstrtoul(buf, 16, &val))
920 spin_lock(&drvdata->spinlock);
921 idx = config->addr_idx;
922 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
923 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
924 spin_unlock(&drvdata->spinlock);
928 config->addr_val[idx] = (u64)val;
929 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
930 spin_unlock(&drvdata->spinlock);
933 static DEVICE_ATTR_RW(addr_single);
935 static ssize_t addr_range_show(struct device *dev,
936 struct device_attribute *attr,
940 unsigned long val1, val2;
941 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
942 struct etmv4_config *config = &drvdata->config;
944 spin_lock(&drvdata->spinlock);
945 idx = config->addr_idx;
947 spin_unlock(&drvdata->spinlock);
950 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
951 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
952 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
953 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
954 spin_unlock(&drvdata->spinlock);
958 val1 = (unsigned long)config->addr_val[idx];
959 val2 = (unsigned long)config->addr_val[idx + 1];
960 spin_unlock(&drvdata->spinlock);
961 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
964 static ssize_t addr_range_store(struct device *dev,
965 struct device_attribute *attr,
966 const char *buf, size_t size)
969 unsigned long val1, val2;
970 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
971 struct etmv4_config *config = &drvdata->config;
973 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
975 /* lower address comparator cannot have a higher address value */
979 spin_lock(&drvdata->spinlock);
980 idx = config->addr_idx;
982 spin_unlock(&drvdata->spinlock);
986 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
987 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
988 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
989 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
990 spin_unlock(&drvdata->spinlock);
994 config->addr_val[idx] = (u64)val1;
995 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
996 config->addr_val[idx + 1] = (u64)val2;
997 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
999 * Program include or exclude control bits for vinst or vdata
1000 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1002 if (config->mode & ETM_MODE_EXCLUDE)
1003 etm4_set_mode_exclude(drvdata, true);
1005 etm4_set_mode_exclude(drvdata, false);
1007 spin_unlock(&drvdata->spinlock);
1010 static DEVICE_ATTR_RW(addr_range);
1012 static ssize_t addr_start_show(struct device *dev,
1013 struct device_attribute *attr,
1018 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1019 struct etmv4_config *config = &drvdata->config;
1021 spin_lock(&drvdata->spinlock);
1022 idx = config->addr_idx;
1024 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1025 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1026 spin_unlock(&drvdata->spinlock);
1030 val = (unsigned long)config->addr_val[idx];
1031 spin_unlock(&drvdata->spinlock);
1032 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1035 static ssize_t addr_start_store(struct device *dev,
1036 struct device_attribute *attr,
1037 const char *buf, size_t size)
1041 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1042 struct etmv4_config *config = &drvdata->config;
1044 if (kstrtoul(buf, 16, &val))
1047 spin_lock(&drvdata->spinlock);
1048 idx = config->addr_idx;
1049 if (!drvdata->nr_addr_cmp) {
1050 spin_unlock(&drvdata->spinlock);
1053 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1054 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1055 spin_unlock(&drvdata->spinlock);
1059 config->addr_val[idx] = (u64)val;
1060 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1061 config->vissctlr |= BIT(idx);
1062 /* SSSTATUS, bit[9] - turn on start/stop logic */
1063 config->vinst_ctrl |= BIT(9);
1064 spin_unlock(&drvdata->spinlock);
1067 static DEVICE_ATTR_RW(addr_start);
1069 static ssize_t addr_stop_show(struct device *dev,
1070 struct device_attribute *attr,
1075 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1076 struct etmv4_config *config = &drvdata->config;
1078 spin_lock(&drvdata->spinlock);
1079 idx = config->addr_idx;
1081 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1082 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1083 spin_unlock(&drvdata->spinlock);
1087 val = (unsigned long)config->addr_val[idx];
1088 spin_unlock(&drvdata->spinlock);
1089 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1092 static ssize_t addr_stop_store(struct device *dev,
1093 struct device_attribute *attr,
1094 const char *buf, size_t size)
1098 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1099 struct etmv4_config *config = &drvdata->config;
1101 if (kstrtoul(buf, 16, &val))
1104 spin_lock(&drvdata->spinlock);
1105 idx = config->addr_idx;
1106 if (!drvdata->nr_addr_cmp) {
1107 spin_unlock(&drvdata->spinlock);
1110 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1111 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1112 spin_unlock(&drvdata->spinlock);
1116 config->addr_val[idx] = (u64)val;
1117 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1118 config->vissctlr |= BIT(idx + 16);
1119 /* SSSTATUS, bit[9] - turn on start/stop logic */
1120 config->vinst_ctrl |= BIT(9);
1121 spin_unlock(&drvdata->spinlock);
1124 static DEVICE_ATTR_RW(addr_stop);
1126 static ssize_t addr_ctxtype_show(struct device *dev,
1127 struct device_attribute *attr,
1132 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1133 struct etmv4_config *config = &drvdata->config;
1135 spin_lock(&drvdata->spinlock);
1136 idx = config->addr_idx;
1137 /* CONTEXTTYPE, bits[3:2] */
1138 val = BMVAL(config->addr_acc[idx], 2, 3);
1139 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1140 (val == ETM_CTX_CTXID ? "ctxid" :
1141 (val == ETM_CTX_VMID ? "vmid" : "all")));
1142 spin_unlock(&drvdata->spinlock);
1146 static ssize_t addr_ctxtype_store(struct device *dev,
1147 struct device_attribute *attr,
1148 const char *buf, size_t size)
1152 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1153 struct etmv4_config *config = &drvdata->config;
1155 if (strlen(buf) >= 10)
1157 if (sscanf(buf, "%s", str) != 1)
1160 spin_lock(&drvdata->spinlock);
1161 idx = config->addr_idx;
1162 if (!strcmp(str, "none"))
1163 /* start by clearing context type bits */
1164 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1165 else if (!strcmp(str, "ctxid")) {
1166 /* 0b01 The trace unit performs a Context ID */
1167 if (drvdata->numcidc) {
1168 config->addr_acc[idx] |= BIT(2);
1169 config->addr_acc[idx] &= ~BIT(3);
1171 } else if (!strcmp(str, "vmid")) {
1172 /* 0b10 The trace unit performs a VMID */
1173 if (drvdata->numvmidc) {
1174 config->addr_acc[idx] &= ~BIT(2);
1175 config->addr_acc[idx] |= BIT(3);
1177 } else if (!strcmp(str, "all")) {
1179 * 0b11 The trace unit performs a Context ID
1180 * comparison and a VMID
1182 if (drvdata->numcidc)
1183 config->addr_acc[idx] |= BIT(2);
1184 if (drvdata->numvmidc)
1185 config->addr_acc[idx] |= BIT(3);
1187 spin_unlock(&drvdata->spinlock);
1190 static DEVICE_ATTR_RW(addr_ctxtype);
1192 static ssize_t addr_context_show(struct device *dev,
1193 struct device_attribute *attr,
1198 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1199 struct etmv4_config *config = &drvdata->config;
1201 spin_lock(&drvdata->spinlock);
1202 idx = config->addr_idx;
1203 /* context ID comparator bits[6:4] */
1204 val = BMVAL(config->addr_acc[idx], 4, 6);
1205 spin_unlock(&drvdata->spinlock);
1206 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1209 static ssize_t addr_context_store(struct device *dev,
1210 struct device_attribute *attr,
1211 const char *buf, size_t size)
1215 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1216 struct etmv4_config *config = &drvdata->config;
1218 if (kstrtoul(buf, 16, &val))
1220 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1222 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1223 drvdata->numcidc : drvdata->numvmidc))
1226 spin_lock(&drvdata->spinlock);
1227 idx = config->addr_idx;
1228 /* clear context ID comparator bits[6:4] */
1229 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1230 config->addr_acc[idx] |= (val << 4);
1231 spin_unlock(&drvdata->spinlock);
1234 static DEVICE_ATTR_RW(addr_context);
1236 static ssize_t seq_idx_show(struct device *dev,
1237 struct device_attribute *attr,
1241 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1242 struct etmv4_config *config = &drvdata->config;
1244 val = config->seq_idx;
1245 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1248 static ssize_t seq_idx_store(struct device *dev,
1249 struct device_attribute *attr,
1250 const char *buf, size_t size)
1253 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1254 struct etmv4_config *config = &drvdata->config;
1256 if (kstrtoul(buf, 16, &val))
1258 if (val >= drvdata->nrseqstate - 1)
1262 * Use spinlock to ensure index doesn't change while it gets
1263 * dereferenced multiple times within a spinlock block elsewhere.
1265 spin_lock(&drvdata->spinlock);
1266 config->seq_idx = val;
1267 spin_unlock(&drvdata->spinlock);
1270 static DEVICE_ATTR_RW(seq_idx);
1272 static ssize_t seq_state_show(struct device *dev,
1273 struct device_attribute *attr,
1277 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1278 struct etmv4_config *config = &drvdata->config;
1280 val = config->seq_state;
1281 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1284 static ssize_t seq_state_store(struct device *dev,
1285 struct device_attribute *attr,
1286 const char *buf, size_t size)
1289 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1290 struct etmv4_config *config = &drvdata->config;
1292 if (kstrtoul(buf, 16, &val))
1294 if (val >= drvdata->nrseqstate)
1297 config->seq_state = val;
1300 static DEVICE_ATTR_RW(seq_state);
1302 static ssize_t seq_event_show(struct device *dev,
1303 struct device_attribute *attr,
1308 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1309 struct etmv4_config *config = &drvdata->config;
1311 spin_lock(&drvdata->spinlock);
1312 idx = config->seq_idx;
1313 val = config->seq_ctrl[idx];
1314 spin_unlock(&drvdata->spinlock);
1315 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1318 static ssize_t seq_event_store(struct device *dev,
1319 struct device_attribute *attr,
1320 const char *buf, size_t size)
1324 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1325 struct etmv4_config *config = &drvdata->config;
1327 if (kstrtoul(buf, 16, &val))
1330 spin_lock(&drvdata->spinlock);
1331 idx = config->seq_idx;
1332 /* RST, bits[7:0] */
1333 config->seq_ctrl[idx] = val & 0xFF;
1334 spin_unlock(&drvdata->spinlock);
1337 static DEVICE_ATTR_RW(seq_event);
1339 static ssize_t seq_reset_event_show(struct device *dev,
1340 struct device_attribute *attr,
1344 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1345 struct etmv4_config *config = &drvdata->config;
1347 val = config->seq_rst;
1348 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1351 static ssize_t seq_reset_event_store(struct device *dev,
1352 struct device_attribute *attr,
1353 const char *buf, size_t size)
1356 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1357 struct etmv4_config *config = &drvdata->config;
1359 if (kstrtoul(buf, 16, &val))
1361 if (!(drvdata->nrseqstate))
1364 config->seq_rst = val & ETMv4_EVENT_MASK;
1367 static DEVICE_ATTR_RW(seq_reset_event);
1369 static ssize_t cntr_idx_show(struct device *dev,
1370 struct device_attribute *attr,
1374 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1375 struct etmv4_config *config = &drvdata->config;
1377 val = config->cntr_idx;
1378 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1381 static ssize_t cntr_idx_store(struct device *dev,
1382 struct device_attribute *attr,
1383 const char *buf, size_t size)
1386 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1387 struct etmv4_config *config = &drvdata->config;
1389 if (kstrtoul(buf, 16, &val))
1391 if (val >= drvdata->nr_cntr)
1395 * Use spinlock to ensure index doesn't change while it gets
1396 * dereferenced multiple times within a spinlock block elsewhere.
1398 spin_lock(&drvdata->spinlock);
1399 config->cntr_idx = val;
1400 spin_unlock(&drvdata->spinlock);
1403 static DEVICE_ATTR_RW(cntr_idx);
1405 static ssize_t cntrldvr_show(struct device *dev,
1406 struct device_attribute *attr,
1411 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1412 struct etmv4_config *config = &drvdata->config;
1414 spin_lock(&drvdata->spinlock);
1415 idx = config->cntr_idx;
1416 val = config->cntrldvr[idx];
1417 spin_unlock(&drvdata->spinlock);
1418 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1421 static ssize_t cntrldvr_store(struct device *dev,
1422 struct device_attribute *attr,
1423 const char *buf, size_t size)
1427 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1428 struct etmv4_config *config = &drvdata->config;
1430 if (kstrtoul(buf, 16, &val))
1432 if (val > ETM_CNTR_MAX_VAL)
1435 spin_lock(&drvdata->spinlock);
1436 idx = config->cntr_idx;
1437 config->cntrldvr[idx] = val;
1438 spin_unlock(&drvdata->spinlock);
1441 static DEVICE_ATTR_RW(cntrldvr);
1443 static ssize_t cntr_val_show(struct device *dev,
1444 struct device_attribute *attr,
1449 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1450 struct etmv4_config *config = &drvdata->config;
1452 spin_lock(&drvdata->spinlock);
1453 idx = config->cntr_idx;
1454 val = config->cntr_val[idx];
1455 spin_unlock(&drvdata->spinlock);
1456 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1459 static ssize_t cntr_val_store(struct device *dev,
1460 struct device_attribute *attr,
1461 const char *buf, size_t size)
1465 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1466 struct etmv4_config *config = &drvdata->config;
1468 if (kstrtoul(buf, 16, &val))
1470 if (val > ETM_CNTR_MAX_VAL)
1473 spin_lock(&drvdata->spinlock);
1474 idx = config->cntr_idx;
1475 config->cntr_val[idx] = val;
1476 spin_unlock(&drvdata->spinlock);
1479 static DEVICE_ATTR_RW(cntr_val);
1481 static ssize_t cntr_ctrl_show(struct device *dev,
1482 struct device_attribute *attr,
1487 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1488 struct etmv4_config *config = &drvdata->config;
1490 spin_lock(&drvdata->spinlock);
1491 idx = config->cntr_idx;
1492 val = config->cntr_ctrl[idx];
1493 spin_unlock(&drvdata->spinlock);
1494 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1497 static ssize_t cntr_ctrl_store(struct device *dev,
1498 struct device_attribute *attr,
1499 const char *buf, size_t size)
1503 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1504 struct etmv4_config *config = &drvdata->config;
1506 if (kstrtoul(buf, 16, &val))
1509 spin_lock(&drvdata->spinlock);
1510 idx = config->cntr_idx;
1511 config->cntr_ctrl[idx] = val;
1512 spin_unlock(&drvdata->spinlock);
1515 static DEVICE_ATTR_RW(cntr_ctrl);
1517 static ssize_t res_idx_show(struct device *dev,
1518 struct device_attribute *attr,
1522 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1523 struct etmv4_config *config = &drvdata->config;
1525 val = config->res_idx;
1526 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1529 static ssize_t res_idx_store(struct device *dev,
1530 struct device_attribute *attr,
1531 const char *buf, size_t size)
1534 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1535 struct etmv4_config *config = &drvdata->config;
1537 if (kstrtoul(buf, 16, &val))
1539 /* Resource selector pair 0 is always implemented and reserved */
1540 if ((val == 0) || (val >= drvdata->nr_resource))
1544 * Use spinlock to ensure index doesn't change while it gets
1545 * dereferenced multiple times within a spinlock block elsewhere.
1547 spin_lock(&drvdata->spinlock);
1548 config->res_idx = val;
1549 spin_unlock(&drvdata->spinlock);
1552 static DEVICE_ATTR_RW(res_idx);
1554 static ssize_t res_ctrl_show(struct device *dev,
1555 struct device_attribute *attr,
1560 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1561 struct etmv4_config *config = &drvdata->config;
1563 spin_lock(&drvdata->spinlock);
1564 idx = config->res_idx;
1565 val = config->res_ctrl[idx];
1566 spin_unlock(&drvdata->spinlock);
1567 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1570 static ssize_t res_ctrl_store(struct device *dev,
1571 struct device_attribute *attr,
1572 const char *buf, size_t size)
1576 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1577 struct etmv4_config *config = &drvdata->config;
1579 if (kstrtoul(buf, 16, &val))
1582 spin_lock(&drvdata->spinlock);
1583 idx = config->res_idx;
1584 /* For odd idx pair inversal bit is RES0 */
1586 /* PAIRINV, bit[21] */
1588 config->res_ctrl[idx] = val;
1589 spin_unlock(&drvdata->spinlock);
1592 static DEVICE_ATTR_RW(res_ctrl);
1594 static ssize_t ctxid_idx_show(struct device *dev,
1595 struct device_attribute *attr,
1599 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1600 struct etmv4_config *config = &drvdata->config;
1602 val = config->ctxid_idx;
1603 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1606 static ssize_t ctxid_idx_store(struct device *dev,
1607 struct device_attribute *attr,
1608 const char *buf, size_t size)
1611 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1612 struct etmv4_config *config = &drvdata->config;
1614 if (kstrtoul(buf, 16, &val))
1616 if (val >= drvdata->numcidc)
1620 * Use spinlock to ensure index doesn't change while it gets
1621 * dereferenced multiple times within a spinlock block elsewhere.
1623 spin_lock(&drvdata->spinlock);
1624 config->ctxid_idx = val;
1625 spin_unlock(&drvdata->spinlock);
1628 static DEVICE_ATTR_RW(ctxid_idx);
1630 static ssize_t ctxid_pid_show(struct device *dev,
1631 struct device_attribute *attr,
1636 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1637 struct etmv4_config *config = &drvdata->config;
1640 * Don't use contextID tracing if coming from a PID namespace. See
1641 * comment in ctxid_pid_store().
1643 if (task_active_pid_ns(current) != &init_pid_ns)
1646 spin_lock(&drvdata->spinlock);
1647 idx = config->ctxid_idx;
1648 val = (unsigned long)config->ctxid_pid[idx];
1649 spin_unlock(&drvdata->spinlock);
1650 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1653 static ssize_t ctxid_pid_store(struct device *dev,
1654 struct device_attribute *attr,
1655 const char *buf, size_t size)
1659 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1660 struct etmv4_config *config = &drvdata->config;
1663 * When contextID tracing is enabled the tracers will insert the
1664 * value found in the contextID register in the trace stream. But if
1665 * a process is in a namespace the PID of that process as seen from the
1666 * namespace won't be what the kernel sees, something that makes the
1667 * feature confusing and can potentially leak kernel only information.
1668 * As such refuse to use the feature if @current is not in the initial
1671 if (task_active_pid_ns(current) != &init_pid_ns)
1675 * only implemented when ctxid tracing is enabled, i.e. at least one
1676 * ctxid comparator is implemented and ctxid is greater than 0 bits
1679 if (!drvdata->ctxid_size || !drvdata->numcidc)
1681 if (kstrtoul(buf, 16, &pid))
1684 spin_lock(&drvdata->spinlock);
1685 idx = config->ctxid_idx;
1686 config->ctxid_pid[idx] = (u64)pid;
1687 spin_unlock(&drvdata->spinlock);
1690 static DEVICE_ATTR_RW(ctxid_pid);
1692 static ssize_t ctxid_masks_show(struct device *dev,
1693 struct device_attribute *attr,
1696 unsigned long val1, val2;
1697 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1698 struct etmv4_config *config = &drvdata->config;
1701 * Don't use contextID tracing if coming from a PID namespace. See
1702 * comment in ctxid_pid_store().
1704 if (task_active_pid_ns(current) != &init_pid_ns)
1707 spin_lock(&drvdata->spinlock);
1708 val1 = config->ctxid_mask0;
1709 val2 = config->ctxid_mask1;
1710 spin_unlock(&drvdata->spinlock);
1711 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1714 static ssize_t ctxid_masks_store(struct device *dev,
1715 struct device_attribute *attr,
1716 const char *buf, size_t size)
1719 unsigned long val1, val2, mask;
1720 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1721 struct etmv4_config *config = &drvdata->config;
1724 * Don't use contextID tracing if coming from a PID namespace. See
1725 * comment in ctxid_pid_store().
1727 if (task_active_pid_ns(current) != &init_pid_ns)
1731 * only implemented when ctxid tracing is enabled, i.e. at least one
1732 * ctxid comparator is implemented and ctxid is greater than 0 bits
1735 if (!drvdata->ctxid_size || !drvdata->numcidc)
1737 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1740 spin_lock(&drvdata->spinlock);
1742 * each byte[0..3] controls mask value applied to ctxid
1745 switch (drvdata->numcidc) {
1747 /* COMP0, bits[7:0] */
1748 config->ctxid_mask0 = val1 & 0xFF;
1751 /* COMP1, bits[15:8] */
1752 config->ctxid_mask0 = val1 & 0xFFFF;
1755 /* COMP2, bits[23:16] */
1756 config->ctxid_mask0 = val1 & 0xFFFFFF;
1759 /* COMP3, bits[31:24] */
1760 config->ctxid_mask0 = val1;
1763 /* COMP4, bits[7:0] */
1764 config->ctxid_mask0 = val1;
1765 config->ctxid_mask1 = val2 & 0xFF;
1768 /* COMP5, bits[15:8] */
1769 config->ctxid_mask0 = val1;
1770 config->ctxid_mask1 = val2 & 0xFFFF;
1773 /* COMP6, bits[23:16] */
1774 config->ctxid_mask0 = val1;
1775 config->ctxid_mask1 = val2 & 0xFFFFFF;
1778 /* COMP7, bits[31:24] */
1779 config->ctxid_mask0 = val1;
1780 config->ctxid_mask1 = val2;
1786 * If software sets a mask bit to 1, it must program relevant byte
1787 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1788 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1789 * of ctxid comparator0 value (corresponding to byte 0) register.
1791 mask = config->ctxid_mask0;
1792 for (i = 0; i < drvdata->numcidc; i++) {
1793 /* mask value of corresponding ctxid comparator */
1794 maskbyte = mask & ETMv4_EVENT_MASK;
1796 * each bit corresponds to a byte of respective ctxid comparator
1799 for (j = 0; j < 8; j++) {
1801 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
1804 /* Select the next ctxid comparator mask value */
1806 /* ctxid comparators[4-7] */
1807 mask = config->ctxid_mask1;
1812 spin_unlock(&drvdata->spinlock);
1815 static DEVICE_ATTR_RW(ctxid_masks);
1817 static ssize_t vmid_idx_show(struct device *dev,
1818 struct device_attribute *attr,
1822 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1823 struct etmv4_config *config = &drvdata->config;
1825 val = config->vmid_idx;
1826 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1829 static ssize_t vmid_idx_store(struct device *dev,
1830 struct device_attribute *attr,
1831 const char *buf, size_t size)
1834 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1835 struct etmv4_config *config = &drvdata->config;
1837 if (kstrtoul(buf, 16, &val))
1839 if (val >= drvdata->numvmidc)
1843 * Use spinlock to ensure index doesn't change while it gets
1844 * dereferenced multiple times within a spinlock block elsewhere.
1846 spin_lock(&drvdata->spinlock);
1847 config->vmid_idx = val;
1848 spin_unlock(&drvdata->spinlock);
1851 static DEVICE_ATTR_RW(vmid_idx);
1853 static ssize_t vmid_val_show(struct device *dev,
1854 struct device_attribute *attr,
1858 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1859 struct etmv4_config *config = &drvdata->config;
1861 val = (unsigned long)config->vmid_val[config->vmid_idx];
1862 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1865 static ssize_t vmid_val_store(struct device *dev,
1866 struct device_attribute *attr,
1867 const char *buf, size_t size)
1870 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1871 struct etmv4_config *config = &drvdata->config;
1874 * only implemented when vmid tracing is enabled, i.e. at least one
1875 * vmid comparator is implemented and at least 8 bit vmid size
1877 if (!drvdata->vmid_size || !drvdata->numvmidc)
1879 if (kstrtoul(buf, 16, &val))
1882 spin_lock(&drvdata->spinlock);
1883 config->vmid_val[config->vmid_idx] = (u64)val;
1884 spin_unlock(&drvdata->spinlock);
1887 static DEVICE_ATTR_RW(vmid_val);
1889 static ssize_t vmid_masks_show(struct device *dev,
1890 struct device_attribute *attr, char *buf)
1892 unsigned long val1, val2;
1893 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1894 struct etmv4_config *config = &drvdata->config;
1896 spin_lock(&drvdata->spinlock);
1897 val1 = config->vmid_mask0;
1898 val2 = config->vmid_mask1;
1899 spin_unlock(&drvdata->spinlock);
1900 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1903 static ssize_t vmid_masks_store(struct device *dev,
1904 struct device_attribute *attr,
1905 const char *buf, size_t size)
1908 unsigned long val1, val2, mask;
1909 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1910 struct etmv4_config *config = &drvdata->config;
1913 * only implemented when vmid tracing is enabled, i.e. at least one
1914 * vmid comparator is implemented and at least 8 bit vmid size
1916 if (!drvdata->vmid_size || !drvdata->numvmidc)
1918 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1921 spin_lock(&drvdata->spinlock);
1924 * each byte[0..3] controls mask value applied to vmid
1927 switch (drvdata->numvmidc) {
1929 /* COMP0, bits[7:0] */
1930 config->vmid_mask0 = val1 & 0xFF;
1933 /* COMP1, bits[15:8] */
1934 config->vmid_mask0 = val1 & 0xFFFF;
1937 /* COMP2, bits[23:16] */
1938 config->vmid_mask0 = val1 & 0xFFFFFF;
1941 /* COMP3, bits[31:24] */
1942 config->vmid_mask0 = val1;
1945 /* COMP4, bits[7:0] */
1946 config->vmid_mask0 = val1;
1947 config->vmid_mask1 = val2 & 0xFF;
1950 /* COMP5, bits[15:8] */
1951 config->vmid_mask0 = val1;
1952 config->vmid_mask1 = val2 & 0xFFFF;
1955 /* COMP6, bits[23:16] */
1956 config->vmid_mask0 = val1;
1957 config->vmid_mask1 = val2 & 0xFFFFFF;
1960 /* COMP7, bits[31:24] */
1961 config->vmid_mask0 = val1;
1962 config->vmid_mask1 = val2;
1969 * If software sets a mask bit to 1, it must program relevant byte
1970 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1971 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1972 * of vmid comparator0 value (corresponding to byte 0) register.
1974 mask = config->vmid_mask0;
1975 for (i = 0; i < drvdata->numvmidc; i++) {
1976 /* mask value of corresponding vmid comparator */
1977 maskbyte = mask & ETMv4_EVENT_MASK;
1979 * each bit corresponds to a byte of respective vmid comparator
1982 for (j = 0; j < 8; j++) {
1984 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
1987 /* Select the next vmid comparator mask value */
1989 /* vmid comparators[4-7] */
1990 mask = config->vmid_mask1;
1994 spin_unlock(&drvdata->spinlock);
1997 static DEVICE_ATTR_RW(vmid_masks);
1999 static ssize_t cpu_show(struct device *dev,
2000 struct device_attribute *attr, char *buf)
2003 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2006 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2009 static DEVICE_ATTR_RO(cpu);
2011 static struct attribute *coresight_etmv4_attrs[] = {
2012 &dev_attr_nr_pe_cmp.attr,
2013 &dev_attr_nr_addr_cmp.attr,
2014 &dev_attr_nr_cntr.attr,
2015 &dev_attr_nr_ext_inp.attr,
2016 &dev_attr_numcidc.attr,
2017 &dev_attr_numvmidc.attr,
2018 &dev_attr_nrseqstate.attr,
2019 &dev_attr_nr_resource.attr,
2020 &dev_attr_nr_ss_cmp.attr,
2021 &dev_attr_reset.attr,
2022 &dev_attr_mode.attr,
2024 &dev_attr_event.attr,
2025 &dev_attr_event_instren.attr,
2026 &dev_attr_event_ts.attr,
2027 &dev_attr_syncfreq.attr,
2028 &dev_attr_cyc_threshold.attr,
2029 &dev_attr_bb_ctrl.attr,
2030 &dev_attr_event_vinst.attr,
2031 &dev_attr_s_exlevel_vinst.attr,
2032 &dev_attr_ns_exlevel_vinst.attr,
2033 &dev_attr_addr_idx.attr,
2034 &dev_attr_addr_instdatatype.attr,
2035 &dev_attr_addr_single.attr,
2036 &dev_attr_addr_range.attr,
2037 &dev_attr_addr_start.attr,
2038 &dev_attr_addr_stop.attr,
2039 &dev_attr_addr_ctxtype.attr,
2040 &dev_attr_addr_context.attr,
2041 &dev_attr_seq_idx.attr,
2042 &dev_attr_seq_state.attr,
2043 &dev_attr_seq_event.attr,
2044 &dev_attr_seq_reset_event.attr,
2045 &dev_attr_cntr_idx.attr,
2046 &dev_attr_cntrldvr.attr,
2047 &dev_attr_cntr_val.attr,
2048 &dev_attr_cntr_ctrl.attr,
2049 &dev_attr_res_idx.attr,
2050 &dev_attr_res_ctrl.attr,
2051 &dev_attr_ctxid_idx.attr,
2052 &dev_attr_ctxid_pid.attr,
2053 &dev_attr_ctxid_masks.attr,
2054 &dev_attr_vmid_idx.attr,
2055 &dev_attr_vmid_val.attr,
2056 &dev_attr_vmid_masks.attr,
2066 static void do_smp_cross_read(void *data)
2068 struct etmv4_reg *reg = data;
2070 reg->data = readl_relaxed(reg->addr);
2073 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2075 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2076 struct etmv4_reg reg;
2078 reg.addr = drvdata->base + offset;
2080 * smp cross call ensures the CPU will be powered up before
2081 * accessing the ETMv4 trace core registers
2083 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2087 #define coresight_etm4x_reg(name, offset) \
2088 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2090 #define coresight_etm4x_cross_read(name, offset) \
2091 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2094 coresight_etm4x_reg(trcpdcr, TRCPDCR);
2095 coresight_etm4x_reg(trcpdsr, TRCPDSR);
2096 coresight_etm4x_reg(trclsr, TRCLSR);
2097 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2098 coresight_etm4x_reg(trcdevid, TRCDEVID);
2099 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2100 coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2101 coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2102 coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2103 coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2104 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2105 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2106 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2108 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2109 &dev_attr_trcoslsr.attr,
2110 &dev_attr_trcpdcr.attr,
2111 &dev_attr_trcpdsr.attr,
2112 &dev_attr_trclsr.attr,
2113 &dev_attr_trcconfig.attr,
2114 &dev_attr_trctraceid.attr,
2115 &dev_attr_trcauthstatus.attr,
2116 &dev_attr_trcdevid.attr,
2117 &dev_attr_trcdevtype.attr,
2118 &dev_attr_trcpidr0.attr,
2119 &dev_attr_trcpidr1.attr,
2120 &dev_attr_trcpidr2.attr,
2121 &dev_attr_trcpidr3.attr,
2125 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2126 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2127 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2128 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2129 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2130 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2131 /* trcidr[6,7] are reserved */
2132 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2133 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2134 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2135 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2136 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2137 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2139 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2140 &dev_attr_trcidr0.attr,
2141 &dev_attr_trcidr1.attr,
2142 &dev_attr_trcidr2.attr,
2143 &dev_attr_trcidr3.attr,
2144 &dev_attr_trcidr4.attr,
2145 &dev_attr_trcidr5.attr,
2146 /* trcidr[6,7] are reserved */
2147 &dev_attr_trcidr8.attr,
2148 &dev_attr_trcidr9.attr,
2149 &dev_attr_trcidr10.attr,
2150 &dev_attr_trcidr11.attr,
2151 &dev_attr_trcidr12.attr,
2152 &dev_attr_trcidr13.attr,
2156 static const struct attribute_group coresight_etmv4_group = {
2157 .attrs = coresight_etmv4_attrs,
2160 static const struct attribute_group coresight_etmv4_mgmt_group = {
2161 .attrs = coresight_etmv4_mgmt_attrs,
2165 static const struct attribute_group coresight_etmv4_trcidr_group = {
2166 .attrs = coresight_etmv4_trcidr_attrs,
2170 const struct attribute_group *coresight_etmv4_groups[] = {
2171 &coresight_etmv4_group,
2172 &coresight_etmv4_mgmt_group,
2173 &coresight_etmv4_trcidr_group,