1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
6 #include <linux/kernel.h>
7 #include <linux/moduleparam.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/device.h>
12 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/delay.h>
16 #include <linux/smp.h>
17 #include <linux/sysfs.h>
18 #include <linux/stat.h>
19 #include <linux/clk.h>
20 #include <linux/cpu.h>
21 #include <linux/coresight.h>
22 #include <linux/coresight-pmu.h>
23 #include <linux/pm_wakeup.h>
24 #include <linux/amba/bus.h>
25 #include <linux/seq_file.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/pm_runtime.h>
29 #include <asm/sections.h>
30 #include <asm/local.h>
33 #include "coresight-etm4x.h"
34 #include "coresight-etm-perf.h"
36 static int boot_enable;
37 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
39 /* The number of ETMv4 currently registered */
40 static int etm4_count;
41 static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
42 static void etm4_set_default_config(struct etmv4_config *config);
43 static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
44 struct perf_event *event);
46 static enum cpuhp_state hp_online;
48 static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
50 /* Writing any value to ETMOSLAR unlocks the trace registers */
51 writel_relaxed(0x0, drvdata->base + TRCOSLAR);
52 drvdata->os_unlock = true;
56 static bool etm4_arch_supported(u8 arch)
58 /* Mask out the minor version number */
59 switch (arch & 0xf0) {
68 static int etm4_cpu_id(struct coresight_device *csdev)
70 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
75 static int etm4_trace_id(struct coresight_device *csdev)
77 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
79 return drvdata->trcid;
82 struct etm4_enable_arg {
83 struct etmv4_drvdata *drvdata;
87 static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
90 struct etmv4_config *config = &drvdata->config;
92 CS_UNLOCK(drvdata->base);
94 etm4_os_unlock(drvdata);
96 rc = coresight_claim_device_unlocked(drvdata->base);
100 /* Disable the trace unit before programming trace registers */
101 writel_relaxed(0, drvdata->base + TRCPRGCTLR);
103 /* wait for TRCSTATR.IDLE to go up */
104 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
105 dev_err(drvdata->dev,
106 "timeout while waiting for Idle Trace Status\n");
108 writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
109 writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
110 /* nothing specific implemented */
111 writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
112 writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
113 writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
114 writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
115 writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
116 writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
117 writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
118 writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR);
119 writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
120 writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR);
121 writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
122 writel_relaxed(config->vissctlr,
123 drvdata->base + TRCVISSCTLR);
124 writel_relaxed(config->vipcssctlr,
125 drvdata->base + TRCVIPCSSCTLR);
126 for (i = 0; i < drvdata->nrseqstate - 1; i++)
127 writel_relaxed(config->seq_ctrl[i],
128 drvdata->base + TRCSEQEVRn(i));
129 writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR);
130 writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR);
131 writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR);
132 for (i = 0; i < drvdata->nr_cntr; i++) {
133 writel_relaxed(config->cntrldvr[i],
134 drvdata->base + TRCCNTRLDVRn(i));
135 writel_relaxed(config->cntr_ctrl[i],
136 drvdata->base + TRCCNTCTLRn(i));
137 writel_relaxed(config->cntr_val[i],
138 drvdata->base + TRCCNTVRn(i));
141 /* Resource selector pair 0 is always implemented and reserved */
142 for (i = 0; i < drvdata->nr_resource * 2; i++)
143 writel_relaxed(config->res_ctrl[i],
144 drvdata->base + TRCRSCTLRn(i));
146 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
147 writel_relaxed(config->ss_ctrl[i],
148 drvdata->base + TRCSSCCRn(i));
149 writel_relaxed(config->ss_status[i],
150 drvdata->base + TRCSSCSRn(i));
151 writel_relaxed(config->ss_pe_cmp[i],
152 drvdata->base + TRCSSPCICRn(i));
154 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
155 writeq_relaxed(config->addr_val[i],
156 drvdata->base + TRCACVRn(i));
157 writeq_relaxed(config->addr_acc[i],
158 drvdata->base + TRCACATRn(i));
160 for (i = 0; i < drvdata->numcidc; i++)
161 writeq_relaxed(config->ctxid_pid[i],
162 drvdata->base + TRCCIDCVRn(i));
163 writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
164 writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
166 for (i = 0; i < drvdata->numvmidc; i++)
167 writeq_relaxed(config->vmid_val[i],
168 drvdata->base + TRCVMIDCVRn(i));
169 writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
170 writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
173 * Request to keep the trace unit powered and also
174 * emulation of powerdown
176 writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) | TRCPDCR_PU,
177 drvdata->base + TRCPDCR);
179 /* Enable the trace unit */
180 writel_relaxed(1, drvdata->base + TRCPRGCTLR);
182 /* wait for TRCSTATR.IDLE to go back down to '0' */
183 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
184 dev_err(drvdata->dev,
185 "timeout while waiting for Idle Trace Status\n");
188 CS_LOCK(drvdata->base);
190 dev_dbg(drvdata->dev, "cpu: %d enable smp call done: %d\n",
195 static void etm4_enable_hw_smp_call(void *info)
197 struct etm4_enable_arg *arg = info;
201 arg->rc = etm4_enable_hw(arg->drvdata);
204 static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
205 struct perf_event *event)
208 struct etmv4_config *config = &drvdata->config;
209 struct perf_event_attr *attr = &event->attr;
216 /* Clear configuration from previous run */
217 memset(config, 0, sizeof(struct etmv4_config));
219 if (attr->exclude_kernel)
220 config->mode = ETM_MODE_EXCL_KERN;
222 if (attr->exclude_user)
223 config->mode = ETM_MODE_EXCL_USER;
225 /* Always start from the default config */
226 etm4_set_default_config(config);
228 /* Configure filters specified on the perf cmd line, if any. */
229 ret = etm4_set_event_filters(drvdata, event);
233 /* Go from generic option to ETMv4 specifics */
234 if (attr->config & BIT(ETM_OPT_CYCACC)) {
235 config->cfg |= BIT(4);
236 /* TRM: Must program this for cycacc to work */
237 config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
239 if (attr->config & BIT(ETM_OPT_TS))
240 /* bit[11], Global timestamp tracing bit */
241 config->cfg |= BIT(11);
242 /* return stack - enable if selected and supported */
243 if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
244 /* bit[12], Return stack enable bit */
245 config->cfg |= BIT(12);
251 static int etm4_enable_perf(struct coresight_device *csdev,
252 struct perf_event *event)
255 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
257 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) {
262 /* Configure the tracer based on the session's specifics */
263 ret = etm4_parse_event_config(drvdata, event);
267 ret = etm4_enable_hw(drvdata);
273 static int etm4_enable_sysfs(struct coresight_device *csdev)
275 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
276 struct etm4_enable_arg arg = { 0 };
279 spin_lock(&drvdata->spinlock);
282 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
283 * ensures that register writes occur when cpu is powered.
285 arg.drvdata = drvdata;
286 ret = smp_call_function_single(drvdata->cpu,
287 etm4_enable_hw_smp_call, &arg, 1);
291 drvdata->sticky_enable = true;
292 spin_unlock(&drvdata->spinlock);
295 dev_dbg(drvdata->dev, "ETM tracing enabled\n");
299 static int etm4_enable(struct coresight_device *csdev,
300 struct perf_event *event, u32 mode)
304 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
306 val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
308 /* Someone is already using the tracer */
314 ret = etm4_enable_sysfs(csdev);
317 ret = etm4_enable_perf(csdev, event);
323 /* The tracer didn't start */
325 local_set(&drvdata->mode, CS_MODE_DISABLED);
330 static void etm4_disable_hw(void *info)
333 struct etmv4_drvdata *drvdata = info;
335 CS_UNLOCK(drvdata->base);
337 /* power can be removed from the trace unit now */
338 control = readl_relaxed(drvdata->base + TRCPDCR);
339 control &= ~TRCPDCR_PU;
340 writel_relaxed(control, drvdata->base + TRCPDCR);
342 control = readl_relaxed(drvdata->base + TRCPRGCTLR);
344 /* EN, bit[0] Trace unit enable bit */
347 /* make sure everything completes before disabling */
350 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
352 coresight_disclaim_device_unlocked(drvdata->base);
354 CS_LOCK(drvdata->base);
356 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
359 static int etm4_disable_perf(struct coresight_device *csdev,
360 struct perf_event *event)
363 struct etm_filters *filters = event->hw.addr_filters;
364 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
366 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
369 etm4_disable_hw(drvdata);
372 * Check if the start/stop logic was active when the unit was stopped.
373 * That way we can re-enable the start/stop logic when the process is
374 * scheduled again. Configuration of the start/stop logic happens in
375 * function etm4_set_event_filters().
377 control = readl_relaxed(drvdata->base + TRCVICTLR);
378 /* TRCVICTLR::SSSTATUS, bit[9] */
379 filters->ssstatus = (control & BIT(9));
384 static void etm4_disable_sysfs(struct coresight_device *csdev)
386 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
389 * Taking hotplug lock here protects from clocks getting disabled
390 * with tracing being left on (crash scenario) if user disable occurs
391 * after cpu online mask indicates the cpu is offline but before the
392 * DYING hotplug callback is serviced by the ETM driver.
395 spin_lock(&drvdata->spinlock);
398 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
399 * ensures that register writes occur when cpu is powered.
401 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
403 spin_unlock(&drvdata->spinlock);
406 dev_dbg(drvdata->dev, "ETM tracing disabled\n");
409 static void etm4_disable(struct coresight_device *csdev,
410 struct perf_event *event)
413 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
416 * For as long as the tracer isn't disabled another entity can't
417 * change its status. As such we can read the status here without
418 * fearing it will change under us.
420 mode = local_read(&drvdata->mode);
423 case CS_MODE_DISABLED:
426 etm4_disable_sysfs(csdev);
429 etm4_disable_perf(csdev, event);
434 local_set(&drvdata->mode, CS_MODE_DISABLED);
437 static const struct coresight_ops_source etm4_source_ops = {
438 .cpu_id = etm4_cpu_id,
439 .trace_id = etm4_trace_id,
440 .enable = etm4_enable,
441 .disable = etm4_disable,
444 static const struct coresight_ops etm4_cs_ops = {
445 .source_ops = &etm4_source_ops,
448 static void etm4_init_arch_data(void *info)
456 struct etmv4_drvdata *drvdata = info;
458 /* Make sure all registers are accessible */
459 etm4_os_unlock(drvdata);
461 CS_UNLOCK(drvdata->base);
463 /* find all capabilities of the tracing unit */
464 etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
466 /* INSTP0, bits[2:1] P0 tracing support field */
467 if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
468 drvdata->instrp0 = true;
470 drvdata->instrp0 = false;
472 /* TRCBB, bit[5] Branch broadcast tracing support bit */
473 if (BMVAL(etmidr0, 5, 5))
474 drvdata->trcbb = true;
476 drvdata->trcbb = false;
478 /* TRCCOND, bit[6] Conditional instruction tracing support bit */
479 if (BMVAL(etmidr0, 6, 6))
480 drvdata->trccond = true;
482 drvdata->trccond = false;
484 /* TRCCCI, bit[7] Cycle counting instruction bit */
485 if (BMVAL(etmidr0, 7, 7))
486 drvdata->trccci = true;
488 drvdata->trccci = false;
490 /* RETSTACK, bit[9] Return stack bit */
491 if (BMVAL(etmidr0, 9, 9))
492 drvdata->retstack = true;
494 drvdata->retstack = false;
496 /* NUMEVENT, bits[11:10] Number of events field */
497 drvdata->nr_event = BMVAL(etmidr0, 10, 11);
498 /* QSUPP, bits[16:15] Q element support field */
499 drvdata->q_support = BMVAL(etmidr0, 15, 16);
500 /* TSSIZE, bits[28:24] Global timestamp size field */
501 drvdata->ts_size = BMVAL(etmidr0, 24, 28);
503 /* base architecture of trace unit */
504 etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
506 * TRCARCHMIN, bits[7:4] architecture the minor version number
507 * TRCARCHMAJ, bits[11:8] architecture major versin number
509 drvdata->arch = BMVAL(etmidr1, 4, 11);
511 /* maximum size of resources */
512 etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
513 /* CIDSIZE, bits[9:5] Indicates the Context ID size */
514 drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
515 /* VMIDSIZE, bits[14:10] Indicates the VMID size */
516 drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
517 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
518 drvdata->ccsize = BMVAL(etmidr2, 25, 28);
520 etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
521 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
522 drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
523 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
524 drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
525 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
526 drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
529 * TRCERR, bit[24] whether a trace unit can trace a
530 * system error exception.
532 if (BMVAL(etmidr3, 24, 24))
533 drvdata->trc_error = true;
535 drvdata->trc_error = false;
537 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
538 if (BMVAL(etmidr3, 25, 25))
539 drvdata->syncpr = true;
541 drvdata->syncpr = false;
543 /* STALLCTL, bit[26] is stall control implemented? */
544 if (BMVAL(etmidr3, 26, 26))
545 drvdata->stallctl = true;
547 drvdata->stallctl = false;
549 /* SYSSTALL, bit[27] implementation can support stall control? */
550 if (BMVAL(etmidr3, 27, 27))
551 drvdata->sysstall = true;
553 drvdata->sysstall = false;
555 /* NUMPROC, bits[30:28] the number of PEs available for tracing */
556 drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
558 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
559 if (BMVAL(etmidr3, 31, 31))
560 drvdata->nooverflow = true;
562 drvdata->nooverflow = false;
564 /* number of resources trace unit supports */
565 etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
566 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
567 drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
568 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
569 drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
571 * NUMRSPAIR, bits[19:16]
572 * The number of resource pairs conveyed by the HW starts at 0, i.e a
573 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
574 * As such add 1 to the value of NUMRSPAIR for a better representation.
576 drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
578 * NUMSSCC, bits[23:20] the number of single-shot
579 * comparator control for tracing
581 drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
582 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
583 drvdata->numcidc = BMVAL(etmidr4, 24, 27);
584 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
585 drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
587 etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
588 /* NUMEXTIN, bits[8:0] number of external inputs implemented */
589 drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
590 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
591 drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
592 /* ATBTRIG, bit[22] implementation can support ATB triggers? */
593 if (BMVAL(etmidr5, 22, 22))
594 drvdata->atbtrig = true;
596 drvdata->atbtrig = false;
598 * LPOVERRIDE, bit[23] implementation supports
599 * low-power state override
601 if (BMVAL(etmidr5, 23, 23))
602 drvdata->lpoverride = true;
604 drvdata->lpoverride = false;
605 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
606 drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
607 /* NUMCNTR, bits[30:28] number of counters available for tracing */
608 drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
609 CS_LOCK(drvdata->base);
612 static void etm4_set_default_config(struct etmv4_config *config)
614 /* disable all events tracing */
615 config->eventctrl0 = 0x0;
616 config->eventctrl1 = 0x0;
618 /* disable stalling */
619 config->stall_ctrl = 0x0;
621 /* enable trace synchronization every 4096 bytes, if available */
622 config->syncfreq = 0xC;
624 /* disable timestamp event */
625 config->ts_ctrl = 0x0;
627 /* TRCVICTLR::EVENT = 0x01, select the always on logic */
628 config->vinst_ctrl |= BIT(0);
631 static u64 etm4_get_ns_access_type(struct etmv4_config *config)
636 * EXLEVEL_NS, bits[15:12]
637 * The Exception levels are:
638 * Bit[12] Exception level 0 - Application
639 * Bit[13] Exception level 1 - OS
640 * Bit[14] Exception level 2 - Hypervisor
641 * Bit[15] Never implemented
643 if (!is_kernel_in_hyp_mode()) {
644 /* Stay away from hypervisor mode for non-VHE */
645 access_type = ETM_EXLEVEL_NS_HYP;
646 if (config->mode & ETM_MODE_EXCL_KERN)
647 access_type |= ETM_EXLEVEL_NS_OS;
648 } else if (config->mode & ETM_MODE_EXCL_KERN) {
649 access_type = ETM_EXLEVEL_NS_HYP;
652 if (config->mode & ETM_MODE_EXCL_USER)
653 access_type |= ETM_EXLEVEL_NS_APP;
658 static u64 etm4_get_access_type(struct etmv4_config *config)
660 u64 access_type = etm4_get_ns_access_type(config);
663 * EXLEVEL_S, bits[11:8], don't trace anything happening
666 access_type |= (ETM_EXLEVEL_S_APP |
673 static void etm4_set_comparator_filter(struct etmv4_config *config,
674 u64 start, u64 stop, int comparator)
676 u64 access_type = etm4_get_access_type(config);
678 /* First half of default address comparator */
679 config->addr_val[comparator] = start;
680 config->addr_acc[comparator] = access_type;
681 config->addr_type[comparator] = ETM_ADDR_TYPE_RANGE;
683 /* Second half of default address comparator */
684 config->addr_val[comparator + 1] = stop;
685 config->addr_acc[comparator + 1] = access_type;
686 config->addr_type[comparator + 1] = ETM_ADDR_TYPE_RANGE;
689 * Configure the ViewInst function to include this address range
692 * @comparator is divided by two since it is the index in the
693 * etmv4_config::addr_val array but register TRCVIIECTLR deals with
694 * address range comparator _pairs_.
697 * index 0 -> compatator pair 0
698 * index 2 -> comparator pair 1
699 * index 4 -> comparator pair 2
701 * index 14 -> comparator pair 7
703 config->viiectlr |= BIT(comparator / 2);
706 static void etm4_set_start_stop_filter(struct etmv4_config *config,
707 u64 address, int comparator,
708 enum etm_addr_type type)
711 u64 access_type = etm4_get_access_type(config);
713 /* Configure the comparator */
714 config->addr_val[comparator] = address;
715 config->addr_acc[comparator] = access_type;
716 config->addr_type[comparator] = type;
719 * Configure ViewInst Start-Stop control register.
720 * Addresses configured to start tracing go from bit 0 to n-1,
721 * while those configured to stop tracing from 16 to 16 + n-1.
723 shift = (type == ETM_ADDR_TYPE_START ? 0 : 16);
724 config->vissctlr |= BIT(shift + comparator);
727 static void etm4_set_default_filter(struct etmv4_config *config)
732 * Configure address range comparator '0' to encompass all
733 * possible addresses.
738 etm4_set_comparator_filter(config, start, stop,
739 ETM_DEFAULT_ADDR_COMP);
742 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
743 * in the started state
745 config->vinst_ctrl |= BIT(9);
747 /* No start-stop filtering for ViewInst */
748 config->vissctlr = 0x0;
751 static void etm4_set_default(struct etmv4_config *config)
753 if (WARN_ON_ONCE(!config))
757 * Make default initialisation trace everything
759 * Select the "always true" resource selector on the
760 * "Enablign Event" line and configure address range comparator
761 * '0' to trace all the possible address range. From there
762 * configure the "include/exclude" engine to include address
763 * range comparator '0'.
765 etm4_set_default_config(config);
766 etm4_set_default_filter(config);
769 static int etm4_get_next_comparator(struct etmv4_drvdata *drvdata, u32 type)
771 int nr_comparator, index = 0;
772 struct etmv4_config *config = &drvdata->config;
775 * nr_addr_cmp holds the number of comparator _pair_, so time 2
776 * for the total number of comparators.
778 nr_comparator = drvdata->nr_addr_cmp * 2;
780 /* Go through the tally of comparators looking for a free one. */
781 while (index < nr_comparator) {
783 case ETM_ADDR_TYPE_RANGE:
784 if (config->addr_type[index] == ETM_ADDR_TYPE_NONE &&
785 config->addr_type[index + 1] == ETM_ADDR_TYPE_NONE)
788 /* Address range comparators go in pairs */
791 case ETM_ADDR_TYPE_START:
792 case ETM_ADDR_TYPE_STOP:
793 if (config->addr_type[index] == ETM_ADDR_TYPE_NONE)
796 /* Start/stop address can have odd indexes */
804 /* If we are here all the comparators have been used. */
808 static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
809 struct perf_event *event)
811 int i, comparator, ret = 0;
813 struct etmv4_config *config = &drvdata->config;
814 struct etm_filters *filters = event->hw.addr_filters;
819 /* Sync events with what Perf got */
820 perf_event_addr_filters_sync(event);
823 * If there are no filters to deal with simply go ahead with
824 * the default filter, i.e the entire address range.
826 if (!filters->nr_filters)
829 for (i = 0; i < filters->nr_filters; i++) {
830 struct etm_filter *filter = &filters->etm_filter[i];
831 enum etm_addr_type type = filter->type;
833 /* See if a comparator is free. */
834 comparator = etm4_get_next_comparator(drvdata, type);
835 if (comparator < 0) {
841 case ETM_ADDR_TYPE_RANGE:
842 etm4_set_comparator_filter(config,
847 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
848 * in the started state
850 config->vinst_ctrl |= BIT(9);
852 /* No start-stop filtering for ViewInst */
853 config->vissctlr = 0x0;
855 case ETM_ADDR_TYPE_START:
856 case ETM_ADDR_TYPE_STOP:
857 /* Get the right start or stop address */
858 address = (type == ETM_ADDR_TYPE_START ?
862 /* Configure comparator */
863 etm4_set_start_stop_filter(config, address,
867 * If filters::ssstatus == 1, trace acquisition was
868 * started but the process was yanked away before the
869 * the stop address was hit. As such the start/stop
870 * logic needs to be re-started so that tracing can
871 * resume where it left.
873 * The start/stop logic status when a process is
874 * scheduled out is checked in function
875 * etm4_disable_perf().
877 if (filters->ssstatus)
878 config->vinst_ctrl |= BIT(9);
880 /* No include/exclude filtering for ViewInst */
881 config->viiectlr = 0x0;
893 etm4_set_default_filter(config);
899 void etm4_config_trace_mode(struct etmv4_config *config)
904 mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
906 /* excluding kernel AND user space doesn't make sense */
907 WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER));
909 /* nothing to do if neither flags are set */
910 if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
913 addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
914 /* clear default config */
915 addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
918 addr_acc |= etm4_get_ns_access_type(config);
920 config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
921 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
924 static int etm4_online_cpu(unsigned int cpu)
926 if (!etmdrvdata[cpu])
929 if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
930 coresight_enable(etmdrvdata[cpu]->csdev);
934 static int etm4_starting_cpu(unsigned int cpu)
936 if (!etmdrvdata[cpu])
939 spin_lock(&etmdrvdata[cpu]->spinlock);
940 if (!etmdrvdata[cpu]->os_unlock) {
941 etm4_os_unlock(etmdrvdata[cpu]);
942 etmdrvdata[cpu]->os_unlock = true;
945 if (local_read(&etmdrvdata[cpu]->mode))
946 etm4_enable_hw(etmdrvdata[cpu]);
947 spin_unlock(&etmdrvdata[cpu]->spinlock);
951 static int etm4_dying_cpu(unsigned int cpu)
953 if (!etmdrvdata[cpu])
956 spin_lock(&etmdrvdata[cpu]->spinlock);
957 if (local_read(&etmdrvdata[cpu]->mode))
958 etm4_disable_hw(etmdrvdata[cpu]);
959 spin_unlock(&etmdrvdata[cpu]->spinlock);
963 static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
965 drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
968 static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
972 struct device *dev = &adev->dev;
973 struct coresight_platform_data *pdata = NULL;
974 struct etmv4_drvdata *drvdata;
975 struct resource *res = &adev->res;
976 struct coresight_desc desc = { 0 };
977 struct device_node *np = adev->dev.of_node;
979 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
984 pdata = of_get_coresight_platform_data(dev, np);
986 return PTR_ERR(pdata);
987 adev->dev.platform_data = pdata;
990 drvdata->dev = &adev->dev;
991 dev_set_drvdata(dev, drvdata);
993 /* Validity for the resource is already checked by the AMBA core */
994 base = devm_ioremap_resource(dev, res);
996 return PTR_ERR(base);
998 drvdata->base = base;
1000 spin_lock_init(&drvdata->spinlock);
1002 drvdata->cpu = pdata ? pdata->cpu : 0;
1005 etmdrvdata[drvdata->cpu] = drvdata;
1007 if (smp_call_function_single(drvdata->cpu,
1008 etm4_init_arch_data, drvdata, 1))
1009 dev_err(dev, "ETM arch init failed\n");
1011 if (!etm4_count++) {
1012 cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
1013 "arm/coresight4:starting",
1014 etm4_starting_cpu, etm4_dying_cpu);
1015 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
1016 "arm/coresight4:online",
1017 etm4_online_cpu, NULL);
1019 goto err_arch_supported;
1025 if (etm4_arch_supported(drvdata->arch) == false) {
1027 goto err_arch_supported;
1030 etm4_init_trace_id(drvdata);
1031 etm4_set_default(&drvdata->config);
1033 desc.type = CORESIGHT_DEV_TYPE_SOURCE;
1034 desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
1035 desc.ops = &etm4_cs_ops;
1038 desc.groups = coresight_etmv4_groups;
1039 drvdata->csdev = coresight_register(&desc);
1040 if (IS_ERR(drvdata->csdev)) {
1041 ret = PTR_ERR(drvdata->csdev);
1042 goto err_arch_supported;
1045 ret = etm_perf_symlink(drvdata->csdev, true);
1047 coresight_unregister(drvdata->csdev);
1048 goto err_arch_supported;
1051 pm_runtime_put(&adev->dev);
1052 dev_info(dev, "CPU%d: ETM v%d.%d initialized\n",
1053 drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
1056 coresight_enable(drvdata->csdev);
1057 drvdata->boot_enable = true;
1063 if (--etm4_count == 0) {
1064 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
1066 cpuhp_remove_state_nocalls(hp_online);
1071 #define ETM4x_AMBA_ID(pid) \
1074 .mask = 0x000fffff, \
1077 static const struct amba_id etm4_ids[] = {
1078 ETM4x_AMBA_ID(0x000bb95d), /* Cortex-A53 */
1079 ETM4x_AMBA_ID(0x000bb95e), /* Cortex-A57 */
1080 ETM4x_AMBA_ID(0x000bb95a), /* Cortex-A72 */
1081 ETM4x_AMBA_ID(0x000bb959), /* Cortex-A73 */
1082 ETM4x_AMBA_ID(0x000bb9da), /* Cortex-A35 */
1086 static struct amba_driver etm4x_driver = {
1088 .name = "coresight-etm4x",
1089 .suppress_bind_attrs = true,
1091 .probe = etm4_probe,
1092 .id_table = etm4_ids,
1094 builtin_amba_driver(etm4x_driver);