]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/hwtracing/coresight/coresight-tmc-etf.c
Merge tag 'mailbox-v5.1' of git://git.linaro.org/landing-teams/working/fujitsu/integr...
[linux.git] / drivers / hwtracing / coresight / coresight-tmc-etf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2016 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6
7 #include <linux/circ_buf.h>
8 #include <linux/coresight.h>
9 #include <linux/perf_event.h>
10 #include <linux/slab.h>
11 #include "coresight-priv.h"
12 #include "coresight-tmc.h"
13 #include "coresight-etm-perf.h"
14
15 static int tmc_set_etf_buffer(struct coresight_device *csdev,
16                               struct perf_output_handle *handle);
17
18 static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
19 {
20         CS_UNLOCK(drvdata->base);
21
22         /* Wait for TMCSReady bit to be set */
23         tmc_wait_for_tmcready(drvdata);
24
25         writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
26         writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
27                        TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
28                        TMC_FFCR_TRIGON_TRIGIN,
29                        drvdata->base + TMC_FFCR);
30
31         writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
32         tmc_enable_hw(drvdata);
33
34         CS_LOCK(drvdata->base);
35 }
36
37 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
38 {
39         int rc = coresight_claim_device(drvdata->base);
40
41         if (rc)
42                 return rc;
43
44         __tmc_etb_enable_hw(drvdata);
45         return 0;
46 }
47
48 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
49 {
50         char *bufp;
51         u32 read_data, lost;
52
53         /* Check if the buffer wrapped around. */
54         lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
55         bufp = drvdata->buf;
56         drvdata->len = 0;
57         while (1) {
58                 read_data = readl_relaxed(drvdata->base + TMC_RRD);
59                 if (read_data == 0xFFFFFFFF)
60                         break;
61                 memcpy(bufp, &read_data, 4);
62                 bufp += 4;
63                 drvdata->len += 4;
64         }
65
66         if (lost)
67                 coresight_insert_barrier_packet(drvdata->buf);
68         return;
69 }
70
71 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
72 {
73         CS_UNLOCK(drvdata->base);
74
75         tmc_flush_and_stop(drvdata);
76         /*
77          * When operating in sysFS mode the content of the buffer needs to be
78          * read before the TMC is disabled.
79          */
80         if (drvdata->mode == CS_MODE_SYSFS)
81                 tmc_etb_dump_hw(drvdata);
82         tmc_disable_hw(drvdata);
83
84         CS_LOCK(drvdata->base);
85 }
86
87 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
88 {
89         __tmc_etb_disable_hw(drvdata);
90         coresight_disclaim_device(drvdata->base);
91 }
92
93 static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
94 {
95         CS_UNLOCK(drvdata->base);
96
97         /* Wait for TMCSReady bit to be set */
98         tmc_wait_for_tmcready(drvdata);
99
100         writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
101         writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
102                        drvdata->base + TMC_FFCR);
103         writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
104         tmc_enable_hw(drvdata);
105
106         CS_LOCK(drvdata->base);
107 }
108
109 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
110 {
111         int rc = coresight_claim_device(drvdata->base);
112
113         if (rc)
114                 return rc;
115
116         __tmc_etf_enable_hw(drvdata);
117         return 0;
118 }
119
120 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
121 {
122         CS_UNLOCK(drvdata->base);
123
124         tmc_flush_and_stop(drvdata);
125         tmc_disable_hw(drvdata);
126         coresight_disclaim_device_unlocked(drvdata->base);
127         CS_LOCK(drvdata->base);
128 }
129
130 /*
131  * Return the available trace data in the buffer from @pos, with
132  * a maximum limit of @len, updating the @bufpp on where to
133  * find it.
134  */
135 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
136                                 loff_t pos, size_t len, char **bufpp)
137 {
138         ssize_t actual = len;
139
140         /* Adjust the len to available size @pos */
141         if (pos + actual > drvdata->len)
142                 actual = drvdata->len - pos;
143         if (actual > 0)
144                 *bufpp = drvdata->buf + pos;
145         return actual;
146 }
147
148 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
149 {
150         int ret = 0;
151         bool used = false;
152         char *buf = NULL;
153         unsigned long flags;
154         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
155
156         /*
157          * If we don't have a buffer release the lock and allocate memory.
158          * Otherwise keep the lock and move along.
159          */
160         spin_lock_irqsave(&drvdata->spinlock, flags);
161         if (!drvdata->buf) {
162                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
163
164                 /* Allocating the memory here while outside of the spinlock */
165                 buf = kzalloc(drvdata->size, GFP_KERNEL);
166                 if (!buf)
167                         return -ENOMEM;
168
169                 /* Let's try again */
170                 spin_lock_irqsave(&drvdata->spinlock, flags);
171         }
172
173         if (drvdata->reading) {
174                 ret = -EBUSY;
175                 goto out;
176         }
177
178         /*
179          * In sysFS mode we can have multiple writers per sink.  Since this
180          * sink is already enabled no memory is needed and the HW need not be
181          * touched.
182          */
183         if (drvdata->mode == CS_MODE_SYSFS)
184                 goto out;
185
186         /*
187          * If drvdata::buf isn't NULL, memory was allocated for a previous
188          * trace run but wasn't read.  If so simply zero-out the memory.
189          * Otherwise use the memory allocated above.
190          *
191          * The memory is freed when users read the buffer using the
192          * /dev/xyz.{etf|etb} interface.  See tmc_read_unprepare_etf() for
193          * details.
194          */
195         if (drvdata->buf) {
196                 memset(drvdata->buf, 0, drvdata->size);
197         } else {
198                 used = true;
199                 drvdata->buf = buf;
200         }
201
202         ret = tmc_etb_enable_hw(drvdata);
203         if (!ret)
204                 drvdata->mode = CS_MODE_SYSFS;
205         else
206                 /* Free up the buffer if we failed to enable */
207                 used = false;
208 out:
209         spin_unlock_irqrestore(&drvdata->spinlock, flags);
210
211         /* Free memory outside the spinlock if need be */
212         if (!used)
213                 kfree(buf);
214
215         return ret;
216 }
217
218 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
219 {
220         int ret = 0;
221         unsigned long flags;
222         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
223         struct perf_output_handle *handle = data;
224
225         spin_lock_irqsave(&drvdata->spinlock, flags);
226         do {
227                 ret = -EINVAL;
228                 if (drvdata->reading)
229                         break;
230                 /*
231                  * In Perf mode there can be only one writer per sink.  There
232                  * is also no need to continue if the ETB/ETF is already
233                  * operated from sysFS.
234                  */
235                 if (drvdata->mode != CS_MODE_DISABLED)
236                         break;
237
238                 ret = tmc_set_etf_buffer(csdev, handle);
239                 if (ret)
240                         break;
241                 ret  = tmc_etb_enable_hw(drvdata);
242                 if (!ret)
243                         drvdata->mode = CS_MODE_PERF;
244         } while (0);
245         spin_unlock_irqrestore(&drvdata->spinlock, flags);
246
247         return ret;
248 }
249
250 static int tmc_enable_etf_sink(struct coresight_device *csdev,
251                                u32 mode, void *data)
252 {
253         int ret;
254         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
255
256         switch (mode) {
257         case CS_MODE_SYSFS:
258                 ret = tmc_enable_etf_sink_sysfs(csdev);
259                 break;
260         case CS_MODE_PERF:
261                 ret = tmc_enable_etf_sink_perf(csdev, data);
262                 break;
263         /* We shouldn't be here */
264         default:
265                 ret = -EINVAL;
266                 break;
267         }
268
269         if (ret)
270                 return ret;
271
272         dev_dbg(drvdata->dev, "TMC-ETB/ETF enabled\n");
273         return 0;
274 }
275
276 static void tmc_disable_etf_sink(struct coresight_device *csdev)
277 {
278         unsigned long flags;
279         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
280
281         spin_lock_irqsave(&drvdata->spinlock, flags);
282         if (drvdata->reading) {
283                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
284                 return;
285         }
286
287         /* Disable the TMC only if it needs to */
288         if (drvdata->mode != CS_MODE_DISABLED) {
289                 tmc_etb_disable_hw(drvdata);
290                 drvdata->mode = CS_MODE_DISABLED;
291         }
292
293         spin_unlock_irqrestore(&drvdata->spinlock, flags);
294
295         dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n");
296 }
297
298 static int tmc_enable_etf_link(struct coresight_device *csdev,
299                                int inport, int outport)
300 {
301         int ret;
302         unsigned long flags;
303         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
304
305         spin_lock_irqsave(&drvdata->spinlock, flags);
306         if (drvdata->reading) {
307                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
308                 return -EBUSY;
309         }
310
311         ret = tmc_etf_enable_hw(drvdata);
312         if (!ret)
313                 drvdata->mode = CS_MODE_SYSFS;
314         spin_unlock_irqrestore(&drvdata->spinlock, flags);
315
316         if (!ret)
317                 dev_dbg(drvdata->dev, "TMC-ETF enabled\n");
318         return ret;
319 }
320
321 static void tmc_disable_etf_link(struct coresight_device *csdev,
322                                  int inport, int outport)
323 {
324         unsigned long flags;
325         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
326
327         spin_lock_irqsave(&drvdata->spinlock, flags);
328         if (drvdata->reading) {
329                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
330                 return;
331         }
332
333         tmc_etf_disable_hw(drvdata);
334         drvdata->mode = CS_MODE_DISABLED;
335         spin_unlock_irqrestore(&drvdata->spinlock, flags);
336
337         dev_dbg(drvdata->dev, "TMC-ETF disabled\n");
338 }
339
340 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
341                                   void **pages, int nr_pages, bool overwrite)
342 {
343         int node;
344         struct cs_buffers *buf;
345
346         if (cpu == -1)
347                 cpu = smp_processor_id();
348         node = cpu_to_node(cpu);
349
350         /* Allocate memory structure for interaction with Perf */
351         buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
352         if (!buf)
353                 return NULL;
354
355         buf->snapshot = overwrite;
356         buf->nr_pages = nr_pages;
357         buf->data_pages = pages;
358
359         return buf;
360 }
361
362 static void tmc_free_etf_buffer(void *config)
363 {
364         struct cs_buffers *buf = config;
365
366         kfree(buf);
367 }
368
369 static int tmc_set_etf_buffer(struct coresight_device *csdev,
370                               struct perf_output_handle *handle)
371 {
372         int ret = 0;
373         unsigned long head;
374         struct cs_buffers *buf = etm_perf_sink_config(handle);
375
376         if (!buf)
377                 return -EINVAL;
378
379         /* wrap head around to the amount of space we have */
380         head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
381
382         /* find the page to write to */
383         buf->cur = head / PAGE_SIZE;
384
385         /* and offset within that page */
386         buf->offset = head % PAGE_SIZE;
387
388         local_set(&buf->data_size, 0);
389
390         return ret;
391 }
392
393 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
394                                   struct perf_output_handle *handle,
395                                   void *sink_config)
396 {
397         bool lost = false;
398         int i, cur;
399         const u32 *barrier;
400         u32 *buf_ptr;
401         u64 read_ptr, write_ptr;
402         u32 status;
403         unsigned long offset, to_read;
404         struct cs_buffers *buf = sink_config;
405         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
406
407         if (!buf)
408                 return 0;
409
410         /* This shouldn't happen */
411         if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
412                 return 0;
413
414         CS_UNLOCK(drvdata->base);
415
416         tmc_flush_and_stop(drvdata);
417
418         read_ptr = tmc_read_rrp(drvdata);
419         write_ptr = tmc_read_rwp(drvdata);
420
421         /*
422          * Get a hold of the status register and see if a wrap around
423          * has occurred.  If so adjust things accordingly.
424          */
425         status = readl_relaxed(drvdata->base + TMC_STS);
426         if (status & TMC_STS_FULL) {
427                 lost = true;
428                 to_read = drvdata->size;
429         } else {
430                 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
431         }
432
433         /*
434          * The TMC RAM buffer may be bigger than the space available in the
435          * perf ring buffer (handle->size).  If so advance the RRP so that we
436          * get the latest trace data.
437          */
438         if (to_read > handle->size) {
439                 u32 mask = 0;
440
441                 /*
442                  * The value written to RRP must be byte-address aligned to
443                  * the width of the trace memory databus _and_ to a frame
444                  * boundary (16 byte), whichever is the biggest. For example,
445                  * for 32-bit, 64-bit and 128-bit wide trace memory, the four
446                  * LSBs must be 0s. For 256-bit wide trace memory, the five
447                  * LSBs must be 0s.
448                  */
449                 switch (drvdata->memwidth) {
450                 case TMC_MEM_INTF_WIDTH_32BITS:
451                 case TMC_MEM_INTF_WIDTH_64BITS:
452                 case TMC_MEM_INTF_WIDTH_128BITS:
453                         mask = GENMASK(31, 4);
454                         break;
455                 case TMC_MEM_INTF_WIDTH_256BITS:
456                         mask = GENMASK(31, 5);
457                         break;
458                 }
459
460                 /*
461                  * Make sure the new size is aligned in accordance with the
462                  * requirement explained above.
463                  */
464                 to_read = handle->size & mask;
465                 /* Move the RAM read pointer up */
466                 read_ptr = (write_ptr + drvdata->size) - to_read;
467                 /* Make sure we are still within our limits */
468                 if (read_ptr > (drvdata->size - 1))
469                         read_ptr -= drvdata->size;
470                 /* Tell the HW */
471                 tmc_write_rrp(drvdata, read_ptr);
472                 lost = true;
473         }
474
475         if (lost)
476                 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
477
478         cur = buf->cur;
479         offset = buf->offset;
480         barrier = barrier_pkt;
481
482         /* for every byte to read */
483         for (i = 0; i < to_read; i += 4) {
484                 buf_ptr = buf->data_pages[cur] + offset;
485                 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
486
487                 if (lost && *barrier) {
488                         *buf_ptr = *barrier;
489                         barrier++;
490                 }
491
492                 offset += 4;
493                 if (offset >= PAGE_SIZE) {
494                         offset = 0;
495                         cur++;
496                         /* wrap around at the end of the buffer */
497                         cur &= buf->nr_pages - 1;
498                 }
499         }
500
501         /* In snapshot mode we have to update the head */
502         if (buf->snapshot) {
503                 handle->head = (cur * PAGE_SIZE) + offset;
504                 to_read = buf->nr_pages << PAGE_SHIFT;
505         }
506         CS_LOCK(drvdata->base);
507
508         return to_read;
509 }
510
511 static const struct coresight_ops_sink tmc_etf_sink_ops = {
512         .enable         = tmc_enable_etf_sink,
513         .disable        = tmc_disable_etf_sink,
514         .alloc_buffer   = tmc_alloc_etf_buffer,
515         .free_buffer    = tmc_free_etf_buffer,
516         .update_buffer  = tmc_update_etf_buffer,
517 };
518
519 static const struct coresight_ops_link tmc_etf_link_ops = {
520         .enable         = tmc_enable_etf_link,
521         .disable        = tmc_disable_etf_link,
522 };
523
524 const struct coresight_ops tmc_etb_cs_ops = {
525         .sink_ops       = &tmc_etf_sink_ops,
526 };
527
528 const struct coresight_ops tmc_etf_cs_ops = {
529         .sink_ops       = &tmc_etf_sink_ops,
530         .link_ops       = &tmc_etf_link_ops,
531 };
532
533 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
534 {
535         enum tmc_mode mode;
536         int ret = 0;
537         unsigned long flags;
538
539         /* config types are set a boot time and never change */
540         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
541                          drvdata->config_type != TMC_CONFIG_TYPE_ETF))
542                 return -EINVAL;
543
544         spin_lock_irqsave(&drvdata->spinlock, flags);
545
546         if (drvdata->reading) {
547                 ret = -EBUSY;
548                 goto out;
549         }
550
551         /* There is no point in reading a TMC in HW FIFO mode */
552         mode = readl_relaxed(drvdata->base + TMC_MODE);
553         if (mode != TMC_MODE_CIRCULAR_BUFFER) {
554                 ret = -EINVAL;
555                 goto out;
556         }
557
558         /* Don't interfere if operated from Perf */
559         if (drvdata->mode == CS_MODE_PERF) {
560                 ret = -EINVAL;
561                 goto out;
562         }
563
564         /* If drvdata::buf is NULL the trace data has been read already */
565         if (drvdata->buf == NULL) {
566                 ret = -EINVAL;
567                 goto out;
568         }
569
570         /* Disable the TMC if need be */
571         if (drvdata->mode == CS_MODE_SYSFS)
572                 __tmc_etb_disable_hw(drvdata);
573
574         drvdata->reading = true;
575 out:
576         spin_unlock_irqrestore(&drvdata->spinlock, flags);
577
578         return ret;
579 }
580
581 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
582 {
583         char *buf = NULL;
584         enum tmc_mode mode;
585         unsigned long flags;
586
587         /* config types are set a boot time and never change */
588         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
589                          drvdata->config_type != TMC_CONFIG_TYPE_ETF))
590                 return -EINVAL;
591
592         spin_lock_irqsave(&drvdata->spinlock, flags);
593
594         /* There is no point in reading a TMC in HW FIFO mode */
595         mode = readl_relaxed(drvdata->base + TMC_MODE);
596         if (mode != TMC_MODE_CIRCULAR_BUFFER) {
597                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
598                 return -EINVAL;
599         }
600
601         /* Re-enable the TMC if need be */
602         if (drvdata->mode == CS_MODE_SYSFS) {
603                 /*
604                  * The trace run will continue with the same allocated trace
605                  * buffer. As such zero-out the buffer so that we don't end
606                  * up with stale data.
607                  *
608                  * Since the tracer is still enabled drvdata::buf
609                  * can't be NULL.
610                  */
611                 memset(drvdata->buf, 0, drvdata->size);
612                 __tmc_etb_enable_hw(drvdata);
613         } else {
614                 /*
615                  * The ETB/ETF is not tracing and the buffer was just read.
616                  * As such prepare to free the trace buffer.
617                  */
618                 buf = drvdata->buf;
619                 drvdata->buf = NULL;
620         }
621
622         drvdata->reading = false;
623         spin_unlock_irqrestore(&drvdata->spinlock, flags);
624
625         /*
626          * Free allocated memory outside of the spinlock.  There is no need
627          * to assert the validity of 'buf' since calling kfree(NULL) is safe.
628          */
629         kfree(buf);
630
631         return 0;
632 }