]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/hwtracing/coresight/coresight-etb10.c
Merge tag 'sound-5.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
[linux.git] / drivers / hwtracing / coresight / coresight-etb10.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
4  *
5  * Description: CoreSight Embedded Trace Buffer driver
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/io.h>
13 #include <linux/err.h>
14 #include <linux/fs.h>
15 #include <linux/miscdevice.h>
16 #include <linux/uaccess.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/seq_file.h>
21 #include <linux/coresight.h>
22 #include <linux/amba/bus.h>
23 #include <linux/clk.h>
24 #include <linux/circ_buf.h>
25 #include <linux/mm.h>
26 #include <linux/perf_event.h>
27
28
29 #include "coresight-priv.h"
30 #include "coresight-etm-perf.h"
31
32 #define ETB_RAM_DEPTH_REG       0x004
33 #define ETB_STATUS_REG          0x00c
34 #define ETB_RAM_READ_DATA_REG   0x010
35 #define ETB_RAM_READ_POINTER    0x014
36 #define ETB_RAM_WRITE_POINTER   0x018
37 #define ETB_TRG                 0x01c
38 #define ETB_CTL_REG             0x020
39 #define ETB_RWD_REG             0x024
40 #define ETB_FFSR                0x300
41 #define ETB_FFCR                0x304
42 #define ETB_ITMISCOP0           0xee0
43 #define ETB_ITTRFLINACK         0xee4
44 #define ETB_ITTRFLIN            0xee8
45 #define ETB_ITATBDATA0          0xeeC
46 #define ETB_ITATBCTR2           0xef0
47 #define ETB_ITATBCTR1           0xef4
48 #define ETB_ITATBCTR0           0xef8
49
50 /* register description */
51 /* STS - 0x00C */
52 #define ETB_STATUS_RAM_FULL     BIT(0)
53 /* CTL - 0x020 */
54 #define ETB_CTL_CAPT_EN         BIT(0)
55 /* FFCR - 0x304 */
56 #define ETB_FFCR_EN_FTC         BIT(0)
57 #define ETB_FFCR_FON_MAN        BIT(6)
58 #define ETB_FFCR_STOP_FI        BIT(12)
59 #define ETB_FFCR_STOP_TRIGGER   BIT(13)
60
61 #define ETB_FFCR_BIT            6
62 #define ETB_FFSR_BIT            1
63 #define ETB_FRAME_SIZE_WORDS    4
64
65 /**
66  * struct etb_drvdata - specifics associated to an ETB component
67  * @base:       memory mapped base address for this component.
68  * @dev:        the device entity associated to this component.
69  * @atclk:      optional clock for the core parts of the ETB.
70  * @csdev:      component vitals needed by the framework.
71  * @miscdev:    specifics to handle "/dev/xyz.etb" entry.
72  * @spinlock:   only one at a time pls.
73  * @reading:    synchronise user space access to etb buffer.
74  * @buf:        area of memory where ETB buffer content gets sent.
75  * @mode:       this ETB is being used.
76  * @buffer_depth: size of @buf.
77  * @trigger_cntr: amount of words to store after a trigger.
78  */
79 struct etb_drvdata {
80         void __iomem            *base;
81         struct device           *dev;
82         struct clk              *atclk;
83         struct coresight_device *csdev;
84         struct miscdevice       miscdev;
85         spinlock_t              spinlock;
86         local_t                 reading;
87         u8                      *buf;
88         u32                     mode;
89         u32                     buffer_depth;
90         u32                     trigger_cntr;
91 };
92
93 static int etb_set_buffer(struct coresight_device *csdev,
94                           struct perf_output_handle *handle);
95
96 static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
97 {
98         u32 depth = 0;
99
100         pm_runtime_get_sync(drvdata->dev);
101
102         /* RO registers don't need locking */
103         depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
104
105         pm_runtime_put(drvdata->dev);
106         return depth;
107 }
108
109 static void __etb_enable_hw(struct etb_drvdata *drvdata)
110 {
111         int i;
112         u32 depth;
113
114         CS_UNLOCK(drvdata->base);
115
116         depth = drvdata->buffer_depth;
117         /* reset write RAM pointer address */
118         writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
119         /* clear entire RAM buffer */
120         for (i = 0; i < depth; i++)
121                 writel_relaxed(0x0, drvdata->base + ETB_RWD_REG);
122
123         /* reset write RAM pointer address */
124         writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
125         /* reset read RAM pointer address */
126         writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
127
128         writel_relaxed(drvdata->trigger_cntr, drvdata->base + ETB_TRG);
129         writel_relaxed(ETB_FFCR_EN_FTC | ETB_FFCR_STOP_TRIGGER,
130                        drvdata->base + ETB_FFCR);
131         /* ETB trace capture enable */
132         writel_relaxed(ETB_CTL_CAPT_EN, drvdata->base + ETB_CTL_REG);
133
134         CS_LOCK(drvdata->base);
135 }
136
137 static int etb_enable_hw(struct etb_drvdata *drvdata)
138 {
139         int rc = coresight_claim_device(drvdata->base);
140
141         if (rc)
142                 return rc;
143
144         __etb_enable_hw(drvdata);
145         return 0;
146 }
147
148 static int etb_enable_sysfs(struct coresight_device *csdev)
149 {
150         int ret = 0;
151         unsigned long flags;
152         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
153
154         spin_lock_irqsave(&drvdata->spinlock, flags);
155
156         /* Don't messup with perf sessions. */
157         if (drvdata->mode == CS_MODE_PERF) {
158                 ret = -EBUSY;
159                 goto out;
160         }
161
162         /* Nothing to do, the tracer is already enabled. */
163         if (drvdata->mode == CS_MODE_SYSFS)
164                 goto out;
165
166         ret = etb_enable_hw(drvdata);
167         if (!ret)
168                 drvdata->mode = CS_MODE_SYSFS;
169
170 out:
171         spin_unlock_irqrestore(&drvdata->spinlock, flags);
172         return ret;
173 }
174
175 static int etb_enable_perf(struct coresight_device *csdev, void *data)
176 {
177         int ret = 0;
178         unsigned long flags;
179         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
180
181         spin_lock_irqsave(&drvdata->spinlock, flags);
182
183         /* No need to continue if the component is already in use. */
184         if (drvdata->mode != CS_MODE_DISABLED) {
185                 ret = -EBUSY;
186                 goto out;
187         }
188
189         /*
190          * We don't have an internal state to clean up if we fail to setup
191          * the perf buffer. So we can perform the step before we turn the
192          * ETB on and leave without cleaning up.
193          */
194         ret = etb_set_buffer(csdev, (struct perf_output_handle *)data);
195         if (ret)
196                 goto out;
197
198         ret = etb_enable_hw(drvdata);
199         if (!ret)
200                 drvdata->mode = CS_MODE_PERF;
201
202 out:
203         spin_unlock_irqrestore(&drvdata->spinlock, flags);
204         return ret;
205 }
206
207 static int etb_enable(struct coresight_device *csdev, u32 mode, void *data)
208 {
209         int ret;
210         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
211
212         switch (mode) {
213         case CS_MODE_SYSFS:
214                 ret = etb_enable_sysfs(csdev);
215                 break;
216         case CS_MODE_PERF:
217                 ret = etb_enable_perf(csdev, data);
218                 break;
219         default:
220                 ret = -EINVAL;
221                 break;
222         }
223
224         if (ret)
225                 return ret;
226
227         dev_dbg(drvdata->dev, "ETB enabled\n");
228         return 0;
229 }
230
231 static void __etb_disable_hw(struct etb_drvdata *drvdata)
232 {
233         u32 ffcr;
234
235         CS_UNLOCK(drvdata->base);
236
237         ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
238         /* stop formatter when a stop has completed */
239         ffcr |= ETB_FFCR_STOP_FI;
240         writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
241         /* manually generate a flush of the system */
242         ffcr |= ETB_FFCR_FON_MAN;
243         writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
244
245         if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
246                 dev_err(drvdata->dev,
247                 "timeout while waiting for completion of Manual Flush\n");
248         }
249
250         /* disable trace capture */
251         writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
252
253         if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
254                 dev_err(drvdata->dev,
255                         "timeout while waiting for Formatter to Stop\n");
256         }
257
258         CS_LOCK(drvdata->base);
259 }
260
261 static void etb_dump_hw(struct etb_drvdata *drvdata)
262 {
263         bool lost = false;
264         int i;
265         u8 *buf_ptr;
266         u32 read_data, depth;
267         u32 read_ptr, write_ptr;
268         u32 frame_off, frame_endoff;
269
270         CS_UNLOCK(drvdata->base);
271
272         read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
273         write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
274
275         frame_off = write_ptr % ETB_FRAME_SIZE_WORDS;
276         frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off;
277         if (frame_off) {
278                 dev_err(drvdata->dev,
279                         "write_ptr: %lu not aligned to formatter frame size\n",
280                         (unsigned long)write_ptr);
281                 dev_err(drvdata->dev, "frameoff: %lu, frame_endoff: %lu\n",
282                         (unsigned long)frame_off, (unsigned long)frame_endoff);
283                 write_ptr += frame_endoff;
284         }
285
286         if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
287                       & ETB_STATUS_RAM_FULL) == 0) {
288                 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
289         } else {
290                 writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
291                 lost = true;
292         }
293
294         depth = drvdata->buffer_depth;
295         buf_ptr = drvdata->buf;
296         for (i = 0; i < depth; i++) {
297                 read_data = readl_relaxed(drvdata->base +
298                                           ETB_RAM_READ_DATA_REG);
299                 *(u32 *)buf_ptr = read_data;
300                 buf_ptr += 4;
301         }
302
303         if (lost)
304                 coresight_insert_barrier_packet(drvdata->buf);
305
306         if (frame_off) {
307                 buf_ptr -= (frame_endoff * 4);
308                 for (i = 0; i < frame_endoff; i++) {
309                         *buf_ptr++ = 0x0;
310                         *buf_ptr++ = 0x0;
311                         *buf_ptr++ = 0x0;
312                         *buf_ptr++ = 0x0;
313                 }
314         }
315
316         writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
317
318         CS_LOCK(drvdata->base);
319 }
320
321 static void etb_disable_hw(struct etb_drvdata *drvdata)
322 {
323         __etb_disable_hw(drvdata);
324         etb_dump_hw(drvdata);
325         coresight_disclaim_device(drvdata->base);
326 }
327
328 static void etb_disable(struct coresight_device *csdev)
329 {
330         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
331         unsigned long flags;
332
333         spin_lock_irqsave(&drvdata->spinlock, flags);
334
335         /* Disable the ETB only if it needs to */
336         if (drvdata->mode != CS_MODE_DISABLED) {
337                 etb_disable_hw(drvdata);
338                 drvdata->mode = CS_MODE_DISABLED;
339         }
340         spin_unlock_irqrestore(&drvdata->spinlock, flags);
341
342         dev_dbg(drvdata->dev, "ETB disabled\n");
343 }
344
345 static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
346                               void **pages, int nr_pages, bool overwrite)
347 {
348         int node;
349         struct cs_buffers *buf;
350
351         if (cpu == -1)
352                 cpu = smp_processor_id();
353         node = cpu_to_node(cpu);
354
355         buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
356         if (!buf)
357                 return NULL;
358
359         buf->snapshot = overwrite;
360         buf->nr_pages = nr_pages;
361         buf->data_pages = pages;
362
363         return buf;
364 }
365
366 static void etb_free_buffer(void *config)
367 {
368         struct cs_buffers *buf = config;
369
370         kfree(buf);
371 }
372
373 static int etb_set_buffer(struct coresight_device *csdev,
374                           struct perf_output_handle *handle)
375 {
376         int ret = 0;
377         unsigned long head;
378         struct cs_buffers *buf = etm_perf_sink_config(handle);
379
380         if (!buf)
381                 return -EINVAL;
382
383         /* wrap head around to the amount of space we have */
384         head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
385
386         /* find the page to write to */
387         buf->cur = head / PAGE_SIZE;
388
389         /* and offset within that page */
390         buf->offset = head % PAGE_SIZE;
391
392         local_set(&buf->data_size, 0);
393
394         return ret;
395 }
396
397 static unsigned long etb_update_buffer(struct coresight_device *csdev,
398                               struct perf_output_handle *handle,
399                               void *sink_config)
400 {
401         bool lost = false;
402         int i, cur;
403         u8 *buf_ptr;
404         const u32 *barrier;
405         u32 read_ptr, write_ptr, capacity;
406         u32 status, read_data;
407         unsigned long offset, to_read;
408         struct cs_buffers *buf = sink_config;
409         struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
410
411         if (!buf)
412                 return 0;
413
414         capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
415
416         __etb_disable_hw(drvdata);
417         CS_UNLOCK(drvdata->base);
418
419         /* unit is in words, not bytes */
420         read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
421         write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
422
423         /*
424          * Entries should be aligned to the frame size.  If they are not
425          * go back to the last alignment point to give decoding tools a
426          * chance to fix things.
427          */
428         if (write_ptr % ETB_FRAME_SIZE_WORDS) {
429                 dev_err(drvdata->dev,
430                         "write_ptr: %lu not aligned to formatter frame size\n",
431                         (unsigned long)write_ptr);
432
433                 write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
434                 lost = true;
435         }
436
437         /*
438          * Get a hold of the status register and see if a wrap around
439          * has occurred.  If so adjust things accordingly.  Otherwise
440          * start at the beginning and go until the write pointer has
441          * been reached.
442          */
443         status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
444         if (status & ETB_STATUS_RAM_FULL) {
445                 lost = true;
446                 to_read = capacity;
447                 read_ptr = write_ptr;
448         } else {
449                 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
450                 to_read *= ETB_FRAME_SIZE_WORDS;
451         }
452
453         /*
454          * Make sure we don't overwrite data that hasn't been consumed yet.
455          * It is entirely possible that the HW buffer has more data than the
456          * ring buffer can currently handle.  If so adjust the start address
457          * to take only the last traces.
458          *
459          * In snapshot mode we are looking to get the latest traces only and as
460          * such, we don't care about not overwriting data that hasn't been
461          * processed by user space.
462          */
463         if (!buf->snapshot && to_read > handle->size) {
464                 u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
465
466                 /* The new read pointer must be frame size aligned */
467                 to_read = handle->size & mask;
468                 /*
469                  * Move the RAM read pointer up, keeping in mind that
470                  * everything is in frame size units.
471                  */
472                 read_ptr = (write_ptr + drvdata->buffer_depth) -
473                                         to_read / ETB_FRAME_SIZE_WORDS;
474                 /* Wrap around if need be*/
475                 if (read_ptr > (drvdata->buffer_depth - 1))
476                         read_ptr -= drvdata->buffer_depth;
477                 /* let the decoder know we've skipped ahead */
478                 lost = true;
479         }
480
481         if (lost)
482                 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
483
484         /* finally tell HW where we want to start reading from */
485         writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
486
487         cur = buf->cur;
488         offset = buf->offset;
489         barrier = barrier_pkt;
490
491         for (i = 0; i < to_read; i += 4) {
492                 buf_ptr = buf->data_pages[cur] + offset;
493                 read_data = readl_relaxed(drvdata->base +
494                                           ETB_RAM_READ_DATA_REG);
495                 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
496                         read_data = *barrier;
497                         barrier++;
498                 }
499
500                 *(u32 *)buf_ptr = read_data;
501                 buf_ptr += 4;
502
503                 offset += 4;
504                 if (offset >= PAGE_SIZE) {
505                         offset = 0;
506                         cur++;
507                         /* wrap around at the end of the buffer */
508                         cur &= buf->nr_pages - 1;
509                 }
510         }
511
512         /* reset ETB buffer for next run */
513         writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
514         writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
515
516         /*
517          * In snapshot mode we have to update the handle->head to point
518          * to the new location.
519          */
520         if (buf->snapshot) {
521                 handle->head = (cur * PAGE_SIZE) + offset;
522                 to_read = buf->nr_pages << PAGE_SHIFT;
523         }
524         __etb_enable_hw(drvdata);
525         CS_LOCK(drvdata->base);
526
527         return to_read;
528 }
529
530 static const struct coresight_ops_sink etb_sink_ops = {
531         .enable         = etb_enable,
532         .disable        = etb_disable,
533         .alloc_buffer   = etb_alloc_buffer,
534         .free_buffer    = etb_free_buffer,
535         .update_buffer  = etb_update_buffer,
536 };
537
538 static const struct coresight_ops etb_cs_ops = {
539         .sink_ops       = &etb_sink_ops,
540 };
541
542 static void etb_dump(struct etb_drvdata *drvdata)
543 {
544         unsigned long flags;
545
546         spin_lock_irqsave(&drvdata->spinlock, flags);
547         if (drvdata->mode == CS_MODE_SYSFS) {
548                 __etb_disable_hw(drvdata);
549                 etb_dump_hw(drvdata);
550                 __etb_enable_hw(drvdata);
551         }
552         spin_unlock_irqrestore(&drvdata->spinlock, flags);
553
554         dev_dbg(drvdata->dev, "ETB dumped\n");
555 }
556
557 static int etb_open(struct inode *inode, struct file *file)
558 {
559         struct etb_drvdata *drvdata = container_of(file->private_data,
560                                                    struct etb_drvdata, miscdev);
561
562         if (local_cmpxchg(&drvdata->reading, 0, 1))
563                 return -EBUSY;
564
565         dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
566         return 0;
567 }
568
569 static ssize_t etb_read(struct file *file, char __user *data,
570                                 size_t len, loff_t *ppos)
571 {
572         u32 depth;
573         struct etb_drvdata *drvdata = container_of(file->private_data,
574                                                    struct etb_drvdata, miscdev);
575
576         etb_dump(drvdata);
577
578         depth = drvdata->buffer_depth;
579         if (*ppos + len > depth * 4)
580                 len = depth * 4 - *ppos;
581
582         if (copy_to_user(data, drvdata->buf + *ppos, len)) {
583                 dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
584                 return -EFAULT;
585         }
586
587         *ppos += len;
588
589         dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
590                 __func__, len, (int)(depth * 4 - *ppos));
591         return len;
592 }
593
594 static int etb_release(struct inode *inode, struct file *file)
595 {
596         struct etb_drvdata *drvdata = container_of(file->private_data,
597                                                    struct etb_drvdata, miscdev);
598         local_set(&drvdata->reading, 0);
599
600         dev_dbg(drvdata->dev, "%s: released\n", __func__);
601         return 0;
602 }
603
604 static const struct file_operations etb_fops = {
605         .owner          = THIS_MODULE,
606         .open           = etb_open,
607         .read           = etb_read,
608         .release        = etb_release,
609         .llseek         = no_llseek,
610 };
611
612 #define coresight_etb10_reg(name, offset)               \
613         coresight_simple_reg32(struct etb_drvdata, name, offset)
614
615 coresight_etb10_reg(rdp, ETB_RAM_DEPTH_REG);
616 coresight_etb10_reg(sts, ETB_STATUS_REG);
617 coresight_etb10_reg(rrp, ETB_RAM_READ_POINTER);
618 coresight_etb10_reg(rwp, ETB_RAM_WRITE_POINTER);
619 coresight_etb10_reg(trg, ETB_TRG);
620 coresight_etb10_reg(ctl, ETB_CTL_REG);
621 coresight_etb10_reg(ffsr, ETB_FFSR);
622 coresight_etb10_reg(ffcr, ETB_FFCR);
623
624 static struct attribute *coresight_etb_mgmt_attrs[] = {
625         &dev_attr_rdp.attr,
626         &dev_attr_sts.attr,
627         &dev_attr_rrp.attr,
628         &dev_attr_rwp.attr,
629         &dev_attr_trg.attr,
630         &dev_attr_ctl.attr,
631         &dev_attr_ffsr.attr,
632         &dev_attr_ffcr.attr,
633         NULL,
634 };
635
636 static ssize_t trigger_cntr_show(struct device *dev,
637                             struct device_attribute *attr, char *buf)
638 {
639         struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
640         unsigned long val = drvdata->trigger_cntr;
641
642         return sprintf(buf, "%#lx\n", val);
643 }
644
645 static ssize_t trigger_cntr_store(struct device *dev,
646                              struct device_attribute *attr,
647                              const char *buf, size_t size)
648 {
649         int ret;
650         unsigned long val;
651         struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
652
653         ret = kstrtoul(buf, 16, &val);
654         if (ret)
655                 return ret;
656
657         drvdata->trigger_cntr = val;
658         return size;
659 }
660 static DEVICE_ATTR_RW(trigger_cntr);
661
662 static struct attribute *coresight_etb_attrs[] = {
663         &dev_attr_trigger_cntr.attr,
664         NULL,
665 };
666
667 static const struct attribute_group coresight_etb_group = {
668         .attrs = coresight_etb_attrs,
669 };
670
671 static const struct attribute_group coresight_etb_mgmt_group = {
672         .attrs = coresight_etb_mgmt_attrs,
673         .name = "mgmt",
674 };
675
676 const struct attribute_group *coresight_etb_groups[] = {
677         &coresight_etb_group,
678         &coresight_etb_mgmt_group,
679         NULL,
680 };
681
682 static int etb_probe(struct amba_device *adev, const struct amba_id *id)
683 {
684         int ret;
685         void __iomem *base;
686         struct device *dev = &adev->dev;
687         struct coresight_platform_data *pdata = NULL;
688         struct etb_drvdata *drvdata;
689         struct resource *res = &adev->res;
690         struct coresight_desc desc = { 0 };
691         struct device_node *np = adev->dev.of_node;
692
693         if (np) {
694                 pdata = of_get_coresight_platform_data(dev, np);
695                 if (IS_ERR(pdata))
696                         return PTR_ERR(pdata);
697                 adev->dev.platform_data = pdata;
698         }
699
700         drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
701         if (!drvdata)
702                 return -ENOMEM;
703
704         drvdata->dev = &adev->dev;
705         drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
706         if (!IS_ERR(drvdata->atclk)) {
707                 ret = clk_prepare_enable(drvdata->atclk);
708                 if (ret)
709                         return ret;
710         }
711         dev_set_drvdata(dev, drvdata);
712
713         /* validity for the resource is already checked by the AMBA core */
714         base = devm_ioremap_resource(dev, res);
715         if (IS_ERR(base))
716                 return PTR_ERR(base);
717
718         drvdata->base = base;
719
720         spin_lock_init(&drvdata->spinlock);
721
722         drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
723         pm_runtime_put(&adev->dev);
724
725         if (drvdata->buffer_depth & 0x80000000)
726                 return -EINVAL;
727
728         drvdata->buf = devm_kcalloc(dev,
729                                     drvdata->buffer_depth, 4, GFP_KERNEL);
730         if (!drvdata->buf)
731                 return -ENOMEM;
732
733         desc.type = CORESIGHT_DEV_TYPE_SINK;
734         desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
735         desc.ops = &etb_cs_ops;
736         desc.pdata = pdata;
737         desc.dev = dev;
738         desc.groups = coresight_etb_groups;
739         drvdata->csdev = coresight_register(&desc);
740         if (IS_ERR(drvdata->csdev))
741                 return PTR_ERR(drvdata->csdev);
742
743         drvdata->miscdev.name = pdata->name;
744         drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
745         drvdata->miscdev.fops = &etb_fops;
746         ret = misc_register(&drvdata->miscdev);
747         if (ret)
748                 goto err_misc_register;
749
750         return 0;
751
752 err_misc_register:
753         coresight_unregister(drvdata->csdev);
754         return ret;
755 }
756
757 #ifdef CONFIG_PM
758 static int etb_runtime_suspend(struct device *dev)
759 {
760         struct etb_drvdata *drvdata = dev_get_drvdata(dev);
761
762         if (drvdata && !IS_ERR(drvdata->atclk))
763                 clk_disable_unprepare(drvdata->atclk);
764
765         return 0;
766 }
767
768 static int etb_runtime_resume(struct device *dev)
769 {
770         struct etb_drvdata *drvdata = dev_get_drvdata(dev);
771
772         if (drvdata && !IS_ERR(drvdata->atclk))
773                 clk_prepare_enable(drvdata->atclk);
774
775         return 0;
776 }
777 #endif
778
779 static const struct dev_pm_ops etb_dev_pm_ops = {
780         SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
781 };
782
783 static const struct amba_id etb_ids[] = {
784         {
785                 .id     = 0x000bb907,
786                 .mask   = 0x000fffff,
787         },
788         { 0, 0},
789 };
790
791 static struct amba_driver etb_driver = {
792         .drv = {
793                 .name   = "coresight-etb10",
794                 .owner  = THIS_MODULE,
795                 .pm     = &etb_dev_pm_ops,
796                 .suppress_bind_attrs = true,
797
798         },
799         .probe          = etb_probe,
800         .id_table       = etb_ids,
801 };
802 builtin_amba_driver(etb_driver);