1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/coresight.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/iommu.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include "coresight-catu.h"
13 #include "coresight-etm-perf.h"
14 #include "coresight-priv.h"
15 #include "coresight-tmc.h"
25 * etr_perf_buffer - Perf buffer used for ETR
26 * @etr_buf - Actual buffer used by the ETR
27 * @snaphost - Perf session mode
28 * @head - handle->head at the beginning of the session.
29 * @nr_pages - Number of pages in the ring buffer.
30 * @pages - Array of Pages in the ring buffer.
32 struct etr_perf_buffer {
33 struct etr_buf *etr_buf;
40 /* Convert the perf index to an offset within the ETR buffer */
41 #define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
43 /* Lower limit for ETR hardware buffer */
44 #define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M
47 * The TMC ETR SG has a page size of 4K. The SG table contains pointers
48 * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
49 * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
50 * contain more than one SG buffer and tables.
52 * A table entry has the following format:
54 * ---Bit31------------Bit4-------Bit1-----Bit0--
55 * | Address[39:12] | SBZ | Entry Type |
56 * ----------------------------------------------
58 * Address: Bits [39:12] of a physical page address. Bits [11:0] are
63 * b01 - Last entry in the tables, points to 4K page buffer.
64 * b10 - Normal entry, points to 4K page buffer.
65 * b11 - Link. The address points to the base of next table.
70 #define ETR_SG_PAGE_SHIFT 12
71 #define ETR_SG_PAGE_SIZE (1UL << ETR_SG_PAGE_SHIFT)
72 #define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE)
73 #define ETR_SG_PTRS_PER_PAGE (ETR_SG_PAGE_SIZE / sizeof(sgte_t))
74 #define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t))
76 #define ETR_SG_ET_MASK 0x3
77 #define ETR_SG_ET_LAST 0x1
78 #define ETR_SG_ET_NORMAL 0x2
79 #define ETR_SG_ET_LINK 0x3
81 #define ETR_SG_ADDR_SHIFT 4
83 #define ETR_SG_ENTRY(addr, type) \
84 (sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
85 (type & ETR_SG_ET_MASK))
87 #define ETR_SG_ADDR(entry) \
88 (((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
89 #define ETR_SG_ET(entry) ((entry) & ETR_SG_ET_MASK)
92 * struct etr_sg_table : ETR SG Table
93 * @sg_table: Generic SG Table holding the data/table pages.
94 * @hwaddr: hwaddress used by the TMC, which is the base
95 * address of the table.
98 struct tmc_sg_table *sg_table;
103 * tmc_etr_sg_table_entries: Total number of table entries required to map
104 * @nr_pages system pages.
106 * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
107 * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
108 * with the last entry pointing to another page of table entries.
109 * If we spill over to a new page for mapping 1 entry, we could as
110 * well replace the link entry of the previous page with the last entry.
112 static inline unsigned long __attribute_const__
113 tmc_etr_sg_table_entries(int nr_pages)
115 unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
116 unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
118 * If we spill over to a new page for 1 entry, we could as well
119 * make it the LAST entry in the previous page, skipping the Link
122 if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
124 return nr_sgpages + nr_sglinks;
128 * tmc_pages_get_offset: Go through all the pages in the tmc_pages
129 * and map the device address @addr to an offset within the virtual
133 tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr)
136 dma_addr_t page_start;
138 for (i = 0; i < tmc_pages->nr_pages; i++) {
139 page_start = tmc_pages->daddrs[i];
140 if (addr >= page_start && addr < (page_start + PAGE_SIZE))
141 return i * PAGE_SIZE + (addr - page_start);
148 * tmc_pages_free : Unmap and free the pages used by tmc_pages.
149 * If the pages were not allocated in tmc_pages_alloc(), we would
150 * simply drop the refcount.
152 static void tmc_pages_free(struct tmc_pages *tmc_pages,
153 struct device *dev, enum dma_data_direction dir)
157 for (i = 0; i < tmc_pages->nr_pages; i++) {
158 if (tmc_pages->daddrs && tmc_pages->daddrs[i])
159 dma_unmap_page(dev, tmc_pages->daddrs[i],
161 if (tmc_pages->pages && tmc_pages->pages[i])
162 __free_page(tmc_pages->pages[i]);
165 kfree(tmc_pages->pages);
166 kfree(tmc_pages->daddrs);
167 tmc_pages->pages = NULL;
168 tmc_pages->daddrs = NULL;
169 tmc_pages->nr_pages = 0;
173 * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages.
174 * If @pages is not NULL, the list of page virtual addresses are
175 * used as the data pages. The pages are then dma_map'ed for @dev
176 * with dma_direction @dir.
178 * Returns 0 upon success, else the error number.
180 static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
181 struct device *dev, int node,
182 enum dma_data_direction dir, void **pages)
188 nr_pages = tmc_pages->nr_pages;
189 tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
191 if (!tmc_pages->daddrs)
193 tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
195 if (!tmc_pages->pages) {
196 kfree(tmc_pages->daddrs);
197 tmc_pages->daddrs = NULL;
201 for (i = 0; i < nr_pages; i++) {
202 if (pages && pages[i]) {
203 page = virt_to_page(pages[i]);
204 /* Hold a refcount on the page */
207 page = alloc_pages_node(node,
208 GFP_KERNEL | __GFP_ZERO, 0);
210 paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
211 if (dma_mapping_error(dev, paddr))
213 tmc_pages->daddrs[i] = paddr;
214 tmc_pages->pages[i] = page;
218 tmc_pages_free(tmc_pages, dev, dir);
223 tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
225 return tmc_pages_get_offset(&sg_table->data_pages, addr);
228 static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
230 if (sg_table->table_vaddr)
231 vunmap(sg_table->table_vaddr);
232 tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE);
235 static void tmc_free_data_pages(struct tmc_sg_table *sg_table)
237 if (sg_table->data_vaddr)
238 vunmap(sg_table->data_vaddr);
239 tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE);
242 void tmc_free_sg_table(struct tmc_sg_table *sg_table)
244 tmc_free_table_pages(sg_table);
245 tmc_free_data_pages(sg_table);
249 * Alloc pages for the table. Since this will be used by the device,
250 * allocate the pages closer to the device (i.e, dev_to_node(dev)
251 * rather than the CPU node).
253 static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table)
256 struct tmc_pages *table_pages = &sg_table->table_pages;
258 rc = tmc_pages_alloc(table_pages, sg_table->dev,
259 dev_to_node(sg_table->dev),
260 DMA_TO_DEVICE, NULL);
263 sg_table->table_vaddr = vmap(table_pages->pages,
264 table_pages->nr_pages,
267 if (!sg_table->table_vaddr)
270 sg_table->table_daddr = table_pages->daddrs[0];
274 static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
278 /* Allocate data pages on the node requested by the caller */
279 rc = tmc_pages_alloc(&sg_table->data_pages,
280 sg_table->dev, sg_table->node,
281 DMA_FROM_DEVICE, pages);
283 sg_table->data_vaddr = vmap(sg_table->data_pages.pages,
284 sg_table->data_pages.nr_pages,
287 if (!sg_table->data_vaddr)
294 * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
295 * and data buffers. TMC writes to the data buffers and reads from the SG
298 * @dev - Device to which page should be DMA mapped.
299 * @node - Numa node for mem allocations
300 * @nr_tpages - Number of pages for the table entries.
301 * @nr_dpages - Number of pages for Data buffer.
302 * @pages - Optional list of virtual address of pages.
304 struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
311 struct tmc_sg_table *sg_table;
313 sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL);
315 return ERR_PTR(-ENOMEM);
316 sg_table->data_pages.nr_pages = nr_dpages;
317 sg_table->table_pages.nr_pages = nr_tpages;
318 sg_table->node = node;
321 rc = tmc_alloc_data_pages(sg_table, pages);
323 rc = tmc_alloc_table_pages(sg_table);
325 tmc_free_sg_table(sg_table);
334 * tmc_sg_table_sync_data_range: Sync the data buffer written
335 * by the device from @offset upto a @size bytes.
337 void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
338 u64 offset, u64 size)
341 int npages = DIV_ROUND_UP(size, PAGE_SIZE);
342 struct device *dev = table->dev;
343 struct tmc_pages *data = &table->data_pages;
345 start = offset >> PAGE_SHIFT;
346 for (i = start; i < (start + npages); i++) {
347 index = i % data->nr_pages;
348 dma_sync_single_for_cpu(dev, data->daddrs[index],
349 PAGE_SIZE, DMA_FROM_DEVICE);
353 /* tmc_sg_sync_table: Sync the page table */
354 void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
357 struct device *dev = sg_table->dev;
358 struct tmc_pages *table_pages = &sg_table->table_pages;
360 for (i = 0; i < table_pages->nr_pages; i++)
361 dma_sync_single_for_device(dev, table_pages->daddrs[i],
362 PAGE_SIZE, DMA_TO_DEVICE);
366 * tmc_sg_table_get_data: Get the buffer pointer for data @offset
367 * in the SG buffer. The @bufpp is updated to point to the buffer.
369 * the length of linear data available at @offset.
371 * <= 0 if no data is available.
373 ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
374 u64 offset, size_t len, char **bufpp)
377 int pg_idx = offset >> PAGE_SHIFT;
378 int pg_offset = offset & (PAGE_SIZE - 1);
379 struct tmc_pages *data_pages = &sg_table->data_pages;
381 size = tmc_sg_table_buf_size(sg_table);
385 /* Make sure we don't go beyond the end */
386 len = (len < (size - offset)) ? len : size - offset;
387 /* Respect the page boundaries */
388 len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
390 *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
395 /* Map a dma address to virtual address */
397 tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
398 dma_addr_t addr, bool table)
402 struct tmc_pages *tmc_pages;
405 tmc_pages = &sg_table->table_pages;
406 base = (unsigned long)sg_table->table_vaddr;
408 tmc_pages = &sg_table->data_pages;
409 base = (unsigned long)sg_table->data_vaddr;
412 offset = tmc_pages_get_offset(tmc_pages, addr);
415 return base + offset;
418 /* Dump the given sg_table */
419 static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
424 struct tmc_sg_table *sg_table = etr_table->sg_table;
426 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
427 etr_table->hwaddr, true);
429 addr = ETR_SG_ADDR(*ptr);
430 switch (ETR_SG_ET(*ptr)) {
431 case ETR_SG_ET_NORMAL:
432 dev_dbg(sg_table->dev,
433 "%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
437 dev_dbg(sg_table->dev,
438 "%05d: *** %p\t:{L} 0x%llx ***\n",
440 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
444 dev_dbg(sg_table->dev,
445 "%05d: ### %p\t:[L] 0x%llx ###\n",
449 dev_dbg(sg_table->dev,
450 "%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
456 dev_dbg(sg_table->dev, "******* End of Table *****\n");
459 static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
463 * Populate the SG Table page table entries from table/data
464 * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
465 * So does a Table page. So we keep track of indices of the tables
466 * in each system page and move the pointers accordingly.
468 #define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
469 static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
472 int i, type, nr_entries;
473 int tpidx = 0; /* index to the current system table_page */
474 int sgtidx = 0; /* index to the sg_table within the current syspage */
475 int sgtentry = 0; /* the entry within the sg_table */
476 int dpidx = 0; /* index to the current system data_page */
477 int spidx = 0; /* index to the SG page within the current data page */
478 sgte_t *ptr; /* pointer to the table entry to fill */
479 struct tmc_sg_table *sg_table = etr_table->sg_table;
480 dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
481 dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
483 nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
485 * Use the contiguous virtual address of the table to update entries.
487 ptr = sg_table->table_vaddr;
489 * Fill all the entries, except the last entry to avoid special
490 * checks within the loop.
492 for (i = 0; i < nr_entries - 1; i++) {
493 if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
495 * Last entry in a sg_table page is a link address to
496 * the next table page. If this sg_table is the last
497 * one in the system page, it links to the first
498 * sg_table in the next system page. Otherwise, it
499 * links to the next sg_table page within the system
502 if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
503 paddr = table_daddrs[tpidx + 1];
505 paddr = table_daddrs[tpidx] +
506 (ETR_SG_PAGE_SIZE * (sgtidx + 1));
508 type = ETR_SG_ET_LINK;
511 * Update the indices to the data_pages to point to the
512 * next sg_page in the data buffer.
514 type = ETR_SG_ET_NORMAL;
515 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
516 if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
519 *ptr++ = ETR_SG_ENTRY(paddr, type);
521 * Move to the next table pointer, moving the table page index
524 if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
525 if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
530 /* Set up the last entry, which is always a data pointer */
531 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
532 *ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
536 * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
537 * populate the table.
539 * @dev - Device pointer for the TMC
540 * @node - NUMA node where the memory should be allocated
541 * @size - Total size of the data buffer
542 * @pages - Optional list of page virtual address
544 static struct etr_sg_table *
545 tmc_init_etr_sg_table(struct device *dev, int node,
546 unsigned long size, void **pages)
548 int nr_entries, nr_tpages;
549 int nr_dpages = size >> PAGE_SHIFT;
550 struct tmc_sg_table *sg_table;
551 struct etr_sg_table *etr_table;
553 etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
555 return ERR_PTR(-ENOMEM);
556 nr_entries = tmc_etr_sg_table_entries(nr_dpages);
557 nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
559 sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
560 if (IS_ERR(sg_table)) {
562 return ERR_CAST(sg_table);
565 etr_table->sg_table = sg_table;
566 /* TMC should use table base address for DBA */
567 etr_table->hwaddr = sg_table->table_daddr;
568 tmc_etr_sg_table_populate(etr_table);
569 /* Sync the table pages for the HW */
570 tmc_sg_table_sync_table(sg_table);
571 tmc_etr_sg_table_dump(etr_table);
577 * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
579 static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
580 struct etr_buf *etr_buf, int node,
583 struct etr_flat_buf *flat_buf;
585 /* We cannot reuse existing pages for flat buf */
589 flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
593 flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size,
594 &flat_buf->daddr, GFP_KERNEL);
595 if (!flat_buf->vaddr) {
600 flat_buf->size = etr_buf->size;
601 flat_buf->dev = drvdata->dev;
602 etr_buf->hwaddr = flat_buf->daddr;
603 etr_buf->mode = ETR_MODE_FLAT;
604 etr_buf->private = flat_buf;
608 static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
610 struct etr_flat_buf *flat_buf = etr_buf->private;
612 if (flat_buf && flat_buf->daddr)
613 dma_free_coherent(flat_buf->dev, flat_buf->size,
614 flat_buf->vaddr, flat_buf->daddr);
618 static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
621 * Adjust the buffer to point to the beginning of the trace data
622 * and update the available trace data.
624 etr_buf->offset = rrp - etr_buf->hwaddr;
626 etr_buf->len = etr_buf->size;
628 etr_buf->len = rwp - rrp;
631 static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
632 u64 offset, size_t len, char **bufpp)
634 struct etr_flat_buf *flat_buf = etr_buf->private;
636 *bufpp = (char *)flat_buf->vaddr + offset;
638 * tmc_etr_buf_get_data already adjusts the length to handle
639 * buffer wrapping around.
644 static const struct etr_buf_operations etr_flat_buf_ops = {
645 .alloc = tmc_etr_alloc_flat_buf,
646 .free = tmc_etr_free_flat_buf,
647 .sync = tmc_etr_sync_flat_buf,
648 .get_data = tmc_etr_get_data_flat_buf,
652 * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
655 static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
656 struct etr_buf *etr_buf, int node,
659 struct etr_sg_table *etr_table;
661 etr_table = tmc_init_etr_sg_table(drvdata->dev, node,
662 etr_buf->size, pages);
663 if (IS_ERR(etr_table))
665 etr_buf->hwaddr = etr_table->hwaddr;
666 etr_buf->mode = ETR_MODE_ETR_SG;
667 etr_buf->private = etr_table;
671 static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf)
673 struct etr_sg_table *etr_table = etr_buf->private;
676 tmc_free_sg_table(etr_table->sg_table);
681 static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset,
682 size_t len, char **bufpp)
684 struct etr_sg_table *etr_table = etr_buf->private;
686 return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp);
689 static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
691 long r_offset, w_offset;
692 struct etr_sg_table *etr_table = etr_buf->private;
693 struct tmc_sg_table *table = etr_table->sg_table;
695 /* Convert hw address to offset in the buffer */
696 r_offset = tmc_sg_get_data_page_offset(table, rrp);
699 "Unable to map RRP %llx to offset\n", rrp);
704 w_offset = tmc_sg_get_data_page_offset(table, rwp);
707 "Unable to map RWP %llx to offset\n", rwp);
712 etr_buf->offset = r_offset;
714 etr_buf->len = etr_buf->size;
716 etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
718 tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
721 static const struct etr_buf_operations etr_sg_buf_ops = {
722 .alloc = tmc_etr_alloc_sg_buf,
723 .free = tmc_etr_free_sg_buf,
724 .sync = tmc_etr_sync_sg_buf,
725 .get_data = tmc_etr_get_data_sg_buf,
729 * TMC ETR could be connected to a CATU device, which can provide address
730 * translation service. This is represented by the Output port of the TMC
731 * (ETR) connected to the input port of the CATU.
733 * Returns : coresight_device ptr for the CATU device if a CATU is found.
736 struct coresight_device *
737 tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
740 struct coresight_device *tmp, *etr = drvdata->csdev;
742 if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
745 for (i = 0; i < etr->nr_outport; i++) {
746 tmp = etr->conns[i].child_dev;
747 if (tmp && coresight_is_catu_device(tmp))
754 static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata,
755 struct etr_buf *etr_buf)
757 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
759 if (catu && helper_ops(catu)->enable)
760 return helper_ops(catu)->enable(catu, etr_buf);
764 static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
766 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
768 if (catu && helper_ops(catu)->disable)
769 helper_ops(catu)->disable(catu, drvdata->etr_buf);
772 static const struct etr_buf_operations *etr_buf_ops[] = {
773 [ETR_MODE_FLAT] = &etr_flat_buf_ops,
774 [ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
775 [ETR_MODE_CATU] = &etr_catu_buf_ops,
778 static inline int tmc_etr_mode_alloc_buf(int mode,
779 struct tmc_drvdata *drvdata,
780 struct etr_buf *etr_buf, int node,
787 case ETR_MODE_ETR_SG:
789 if (etr_buf_ops[mode]->alloc)
790 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
793 etr_buf->ops = etr_buf_ops[mode];
801 * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
802 * @drvdata : ETR device details.
803 * @size : size of the requested buffer.
804 * @flags : Required properties for the buffer.
805 * @node : Node for memory allocations.
806 * @pages : An optional list of pages.
808 static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
809 ssize_t size, int flags,
810 int node, void **pages)
813 bool has_etr_sg, has_iommu;
814 bool has_sg, has_catu;
815 struct etr_buf *etr_buf;
817 has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
818 has_iommu = iommu_get_domain_for_dev(drvdata->dev);
819 has_catu = !!tmc_etr_get_catu_device(drvdata);
821 has_sg = has_catu || has_etr_sg;
823 etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
825 return ERR_PTR(-ENOMEM);
827 etr_buf->size = size;
830 * If we have to use an existing list of pages, we cannot reliably
831 * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
832 * we use the contiguous DMA memory if at least one of the following
833 * conditions is true:
834 * a) The ETR cannot use Scatter-Gather.
835 * b) we have a backing IOMMU
836 * c) The requested memory size is smaller (< 1M).
838 * Fallback to available mechanisms.
842 (!has_sg || has_iommu || size < SZ_1M))
843 rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
844 etr_buf, node, pages);
845 if (rc && has_etr_sg)
846 rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
847 etr_buf, node, pages);
849 rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
850 etr_buf, node, pages);
856 dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n",
857 (unsigned long)size >> 10, etr_buf->mode);
861 static void tmc_free_etr_buf(struct etr_buf *etr_buf)
863 WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
864 etr_buf->ops->free(etr_buf);
869 * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
870 * with a maximum of @len bytes.
871 * Returns: The size of the linear data available @pos, with *bufpp
872 * updated to point to the buffer.
874 static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
875 u64 offset, size_t len, char **bufpp)
877 /* Adjust the length to limit this transaction to end of buffer */
878 len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
880 return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
884 tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
889 len = tmc_etr_buf_get_data(etr_buf, offset,
890 CORESIGHT_BARRIER_PKT_SIZE, &bufp);
891 if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
893 coresight_insert_barrier_packet(bufp);
894 return offset + CORESIGHT_BARRIER_PKT_SIZE;
898 * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
899 * Makes sure the trace data is synced to the memory for consumption.
900 * @etr_buf->offset will hold the offset to the beginning of the trace data
901 * within the buffer, with @etr_buf->len bytes to consume.
903 static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
905 struct etr_buf *etr_buf = drvdata->etr_buf;
909 rrp = tmc_read_rrp(drvdata);
910 rwp = tmc_read_rwp(drvdata);
911 status = readl_relaxed(drvdata->base + TMC_STS);
912 etr_buf->full = status & TMC_STS_FULL;
914 WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
916 etr_buf->ops->sync(etr_buf, rrp, rwp);
918 /* Insert barrier packets at the beginning, if there was an overflow */
920 tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
923 static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
926 struct etr_buf *etr_buf = drvdata->etr_buf;
928 CS_UNLOCK(drvdata->base);
930 /* Wait for TMCSReady bit to be set */
931 tmc_wait_for_tmcready(drvdata);
933 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
934 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
936 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
937 axictl &= ~TMC_AXICTL_CLEAR_MASK;
938 axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
939 axictl |= TMC_AXICTL_AXCACHE_OS;
941 if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
942 axictl &= ~TMC_AXICTL_ARCACHE_MASK;
943 axictl |= TMC_AXICTL_ARCACHE_OS;
946 if (etr_buf->mode == ETR_MODE_ETR_SG)
947 axictl |= TMC_AXICTL_SCT_GAT_MODE;
949 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
950 tmc_write_dba(drvdata, etr_buf->hwaddr);
952 * If the TMC pointers must be programmed before the session,
953 * we have to set it properly (i.e, RRP/RWP to base address and
954 * STS to "not full").
956 if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
957 tmc_write_rrp(drvdata, etr_buf->hwaddr);
958 tmc_write_rwp(drvdata, etr_buf->hwaddr);
959 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
960 writel_relaxed(sts, drvdata->base + TMC_STS);
963 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
964 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
965 TMC_FFCR_TRIGON_TRIGIN,
966 drvdata->base + TMC_FFCR);
967 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
968 tmc_enable_hw(drvdata);
970 CS_LOCK(drvdata->base);
973 static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
974 struct etr_buf *etr_buf)
978 /* Callers should provide an appropriate buffer for use */
979 if (WARN_ON(!etr_buf))
982 if ((etr_buf->mode == ETR_MODE_ETR_SG) &&
983 WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
986 if (WARN_ON(drvdata->etr_buf))
990 * If this ETR is connected to a CATU, enable it before we turn
993 rc = tmc_etr_enable_catu(drvdata, etr_buf);
996 rc = coresight_claim_device(drvdata->base);
998 drvdata->etr_buf = etr_buf;
999 __tmc_etr_enable_hw(drvdata);
1006 * Return the available trace data in the buffer (starts at etr_buf->offset,
1007 * limited by etr_buf->len) from @pos, with a maximum limit of @len,
1008 * also updating the @bufpp on where to find it. Since the trace data
1009 * starts at anywhere in the buffer, depending on the RRP, we adjust the
1010 * @len returned to handle buffer wrapping around.
1012 * We are protected here by drvdata->reading != 0, which ensures the
1013 * sysfs_buf stays alive.
1015 ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
1016 loff_t pos, size_t len, char **bufpp)
1019 ssize_t actual = len;
1020 struct etr_buf *etr_buf = drvdata->sysfs_buf;
1022 if (pos + actual > etr_buf->len)
1023 actual = etr_buf->len - pos;
1027 /* Compute the offset from which we read the data */
1028 offset = etr_buf->offset + pos;
1029 if (offset >= etr_buf->size)
1030 offset -= etr_buf->size;
1031 return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
1034 static struct etr_buf *
1035 tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
1037 return tmc_alloc_etr_buf(drvdata, drvdata->size,
1038 0, cpu_to_node(0), NULL);
1042 tmc_etr_free_sysfs_buf(struct etr_buf *buf)
1045 tmc_free_etr_buf(buf);
1048 static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
1050 struct etr_buf *etr_buf = drvdata->etr_buf;
1052 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) {
1053 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf);
1054 drvdata->sysfs_buf = NULL;
1056 tmc_sync_etr_buf(drvdata);
1060 static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1062 CS_UNLOCK(drvdata->base);
1064 tmc_flush_and_stop(drvdata);
1066 * When operating in sysFS mode the content of the buffer needs to be
1067 * read before the TMC is disabled.
1069 if (drvdata->mode == CS_MODE_SYSFS)
1070 tmc_etr_sync_sysfs_buf(drvdata);
1072 tmc_disable_hw(drvdata);
1074 CS_LOCK(drvdata->base);
1078 static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1080 __tmc_etr_disable_hw(drvdata);
1081 /* Disable CATU device if this ETR is connected to one */
1082 tmc_etr_disable_catu(drvdata);
1083 coresight_disclaim_device(drvdata->base);
1084 /* Reset the ETR buf used by hardware */
1085 drvdata->etr_buf = NULL;
1088 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
1091 unsigned long flags;
1092 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1093 struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
1096 * If we are enabling the ETR from disabled state, we need to make
1097 * sure we have a buffer with the right size. The etr_buf is not reset
1098 * immediately after we stop the tracing in SYSFS mode as we wait for
1099 * the user to collect the data. We may be able to reuse the existing
1100 * buffer, provided the size matches. Any allocation has to be done
1101 * with the lock released.
1103 spin_lock_irqsave(&drvdata->spinlock, flags);
1104 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1105 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
1106 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1108 /* Allocate memory with the locks released */
1109 free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
1110 if (IS_ERR(new_buf))
1111 return PTR_ERR(new_buf);
1113 /* Let's try again */
1114 spin_lock_irqsave(&drvdata->spinlock, flags);
1117 if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
1123 * In sysFS mode we can have multiple writers per sink. Since this
1124 * sink is already enabled no memory is needed and the HW need not be
1125 * touched, even if the buffer size has changed.
1127 if (drvdata->mode == CS_MODE_SYSFS)
1131 * If we don't have a buffer or it doesn't match the requested size,
1132 * use the buffer allocated above. Otherwise reuse the existing buffer.
1134 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1135 if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) {
1136 free_buf = sysfs_buf;
1137 drvdata->sysfs_buf = new_buf;
1140 ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
1142 drvdata->mode = CS_MODE_SYSFS;
1144 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1146 /* Free memory outside the spinlock if need be */
1148 tmc_etr_free_sysfs_buf(free_buf);
1151 dev_dbg(drvdata->dev, "TMC-ETR enabled\n");
1157 * tmc_etr_setup_perf_buf: Allocate ETR buffer for use by perf.
1158 * The size of the hardware buffer is dependent on the size configured
1159 * via sysfs and the perf ring buffer size. We prefer to allocate the
1160 * largest possible size, scaling down the size by half until it
1161 * reaches a minimum limit (1M), beyond which we give up.
1163 static struct etr_perf_buffer *
1164 tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, int node, int nr_pages,
1165 void **pages, bool snapshot)
1167 struct etr_buf *etr_buf;
1168 struct etr_perf_buffer *etr_perf;
1171 etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
1173 return ERR_PTR(-ENOMEM);
1176 * Try to match the perf ring buffer size if it is larger
1177 * than the size requested via sysfs.
1179 if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
1180 etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT),
1182 if (!IS_ERR(etr_buf))
1187 * Else switch to configured size for this ETR
1188 * and scale down until we hit the minimum limit.
1190 size = drvdata->size;
1192 etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL);
1193 if (!IS_ERR(etr_buf))
1196 } while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
1199 return ERR_PTR(-ENOMEM);
1202 etr_perf->etr_buf = etr_buf;
1207 static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
1208 int cpu, void **pages, int nr_pages,
1211 struct etr_perf_buffer *etr_perf;
1212 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1215 cpu = smp_processor_id();
1217 etr_perf = tmc_etr_setup_perf_buf(drvdata, cpu_to_node(cpu),
1218 nr_pages, pages, snapshot);
1219 if (IS_ERR(etr_perf)) {
1220 dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n");
1224 etr_perf->snapshot = snapshot;
1225 etr_perf->nr_pages = nr_pages;
1226 etr_perf->pages = pages;
1231 static void tmc_free_etr_buffer(void *config)
1233 struct etr_perf_buffer *etr_perf = config;
1235 if (etr_perf->etr_buf)
1236 tmc_free_etr_buf(etr_perf->etr_buf);
1241 * tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware
1242 * buffer to the perf ring buffer.
1244 static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
1246 long bytes, to_copy;
1247 long pg_idx, pg_offset, src_offset;
1248 unsigned long head = etr_perf->head;
1249 char **dst_pages, *src_buf;
1250 struct etr_buf *etr_buf = etr_perf->etr_buf;
1252 head = etr_perf->head;
1253 pg_idx = head >> PAGE_SHIFT;
1254 pg_offset = head & (PAGE_SIZE - 1);
1255 dst_pages = (char **)etr_perf->pages;
1256 src_offset = etr_buf->offset;
1257 to_copy = etr_buf->len;
1259 while (to_copy > 0) {
1261 * In one iteration, we can copy minimum of :
1262 * 1) what is available in the source buffer,
1263 * 2) what is available in the source buffer, before it
1265 * 3) what is available in the destination page.
1268 bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy,
1270 if (WARN_ON_ONCE(bytes <= 0))
1272 bytes = min(bytes, (long)(PAGE_SIZE - pg_offset));
1274 memcpy(dst_pages[pg_idx] + pg_offset, src_buf, bytes);
1278 /* Move destination pointers */
1280 if (pg_offset == PAGE_SIZE) {
1282 if (++pg_idx == etr_perf->nr_pages)
1286 /* Move source pointers */
1287 src_offset += bytes;
1288 if (src_offset >= etr_buf->size)
1289 src_offset -= etr_buf->size;
1294 * tmc_update_etr_buffer : Update the perf ring buffer with the
1295 * available trace data. We use software double buffering at the moment.
1297 * TODO: Add support for reusing the perf ring buffer.
1299 static unsigned long
1300 tmc_update_etr_buffer(struct coresight_device *csdev,
1301 struct perf_output_handle *handle,
1305 unsigned long flags, size = 0;
1306 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1307 struct etr_perf_buffer *etr_perf = config;
1308 struct etr_buf *etr_buf = etr_perf->etr_buf;
1310 spin_lock_irqsave(&drvdata->spinlock, flags);
1311 if (WARN_ON(drvdata->perf_data != etr_perf)) {
1313 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1317 CS_UNLOCK(drvdata->base);
1319 tmc_flush_and_stop(drvdata);
1320 tmc_sync_etr_buf(drvdata);
1322 CS_LOCK(drvdata->base);
1323 /* Reset perf specific data */
1324 drvdata->perf_data = NULL;
1325 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1327 size = etr_buf->len;
1328 tmc_etr_sync_perf_buffer(etr_perf);
1331 * Update handle->head in snapshot mode. Also update the size to the
1332 * hardware buffer size if there was an overflow.
1334 if (etr_perf->snapshot) {
1335 handle->head += size;
1337 size = etr_buf->size;
1340 lost |= etr_buf->full;
1343 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
1347 static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
1350 unsigned long flags;
1351 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1352 struct perf_output_handle *handle = data;
1353 struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
1355 spin_lock_irqsave(&drvdata->spinlock, flags);
1357 * There can be only one writer per sink in perf mode. If the sink
1358 * is already open in SYSFS mode, we can't use it.
1360 if (drvdata->mode != CS_MODE_DISABLED || WARN_ON(drvdata->perf_data)) {
1365 if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) {
1370 etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
1371 drvdata->perf_data = etr_perf;
1372 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf);
1374 drvdata->mode = CS_MODE_PERF;
1377 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1381 static int tmc_enable_etr_sink(struct coresight_device *csdev,
1382 u32 mode, void *data)
1386 return tmc_enable_etr_sink_sysfs(csdev);
1388 return tmc_enable_etr_sink_perf(csdev, data);
1391 /* We shouldn't be here */
1395 static void tmc_disable_etr_sink(struct coresight_device *csdev)
1397 unsigned long flags;
1398 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1400 spin_lock_irqsave(&drvdata->spinlock, flags);
1401 if (drvdata->reading) {
1402 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1406 /* Disable the TMC only if it needs to */
1407 if (drvdata->mode != CS_MODE_DISABLED) {
1408 tmc_etr_disable_hw(drvdata);
1409 drvdata->mode = CS_MODE_DISABLED;
1412 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1414 dev_dbg(drvdata->dev, "TMC-ETR disabled\n");
1417 static const struct coresight_ops_sink tmc_etr_sink_ops = {
1418 .enable = tmc_enable_etr_sink,
1419 .disable = tmc_disable_etr_sink,
1420 .alloc_buffer = tmc_alloc_etr_buffer,
1421 .update_buffer = tmc_update_etr_buffer,
1422 .free_buffer = tmc_free_etr_buffer,
1425 const struct coresight_ops tmc_etr_cs_ops = {
1426 .sink_ops = &tmc_etr_sink_ops,
1429 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
1432 unsigned long flags;
1434 /* config types are set a boot time and never change */
1435 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1438 spin_lock_irqsave(&drvdata->spinlock, flags);
1439 if (drvdata->reading) {
1445 * We can safely allow reads even if the ETR is operating in PERF mode,
1446 * since the sysfs session is captured in mode specific data.
1447 * If drvdata::sysfs_data is NULL the trace data has been read already.
1449 if (!drvdata->sysfs_buf) {
1454 /* Disable the TMC if we are trying to read from a running session. */
1455 if (drvdata->mode == CS_MODE_SYSFS)
1456 __tmc_etr_disable_hw(drvdata);
1458 drvdata->reading = true;
1460 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1465 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
1467 unsigned long flags;
1468 struct etr_buf *sysfs_buf = NULL;
1470 /* config types are set a boot time and never change */
1471 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1474 spin_lock_irqsave(&drvdata->spinlock, flags);
1476 /* RE-enable the TMC if need be */
1477 if (drvdata->mode == CS_MODE_SYSFS) {
1479 * The trace run will continue with the same allocated trace
1480 * buffer. Since the tracer is still enabled drvdata::buf can't
1483 __tmc_etr_enable_hw(drvdata);
1486 * The ETR is not tracing and the buffer was just read.
1487 * As such prepare to free the trace buffer.
1489 sysfs_buf = drvdata->sysfs_buf;
1490 drvdata->sysfs_buf = NULL;
1493 drvdata->reading = false;
1494 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1496 /* Free allocated memory out side of the spinlock */
1498 tmc_etr_free_sysfs_buf(sysfs_buf);