]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/events/intel/uncore_snb.c
dbaa1b088a30e6106b680a666540016180c694ad
[linux.git] / arch / x86 / events / intel / uncore_snb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
3 #include "uncore.h"
4
5 /* Uncore IMC PCI IDs */
6 #define PCI_DEVICE_ID_INTEL_SNB_IMC             0x0100
7 #define PCI_DEVICE_ID_INTEL_IVB_IMC             0x0154
8 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC          0x0150
9 #define PCI_DEVICE_ID_INTEL_HSW_IMC             0x0c00
10 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC           0x0a04
11 #define PCI_DEVICE_ID_INTEL_BDW_IMC             0x1604
12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC           0x1904
13 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC           0x190c
14 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC          0x1900
15 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC          0x1910
16 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC          0x190f
17 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC          0x191f
18 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC           0x590c
19 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC           0x5904
20 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC          0x5914
21 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC          0x590f
22 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC          0x591f
23 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC          0x5910
24 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC          0x5918
25 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC          0x3ecc
26 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC          0x3ed0
27 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC          0x3e10
28 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC          0x3ec4
29 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC        0x3e0f
30 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC        0x3e1f
31 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC        0x3ec2
32 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC        0x3e30
33 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC        0x3e18
34 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC        0x3ec6
35 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC        0x3e31
36 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC        0x3e33
37 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC        0x3eca
38 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC        0x3e32
39 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC          0x590c
40 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC          0x590d
41 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC          0x3ed0
42 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC        0x3e34
43 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC          0x3e35
44 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC           0x8a02
45 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC          0x8a12
46
47
48 /* SNB event control */
49 #define SNB_UNC_CTL_EV_SEL_MASK                 0x000000ff
50 #define SNB_UNC_CTL_UMASK_MASK                  0x0000ff00
51 #define SNB_UNC_CTL_EDGE_DET                    (1 << 18)
52 #define SNB_UNC_CTL_EN                          (1 << 22)
53 #define SNB_UNC_CTL_INVERT                      (1 << 23)
54 #define SNB_UNC_CTL_CMASK_MASK                  0x1f000000
55 #define NHM_UNC_CTL_CMASK_MASK                  0xff000000
56 #define NHM_UNC_FIXED_CTR_CTL_EN                (1 << 0)
57
58 #define SNB_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
59                                                  SNB_UNC_CTL_UMASK_MASK | \
60                                                  SNB_UNC_CTL_EDGE_DET | \
61                                                  SNB_UNC_CTL_INVERT | \
62                                                  SNB_UNC_CTL_CMASK_MASK)
63
64 #define NHM_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
65                                                  SNB_UNC_CTL_UMASK_MASK | \
66                                                  SNB_UNC_CTL_EDGE_DET | \
67                                                  SNB_UNC_CTL_INVERT | \
68                                                  NHM_UNC_CTL_CMASK_MASK)
69
70 /* SNB global control register */
71 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
72 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
73 #define SNB_UNC_FIXED_CTR                       0x395
74
75 /* SNB uncore global control */
76 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
77 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
78
79 /* SNB Cbo register */
80 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
81 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
82 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
83
84 /* SNB ARB register */
85 #define SNB_UNC_ARB_PER_CTR0                    0x3b0
86 #define SNB_UNC_ARB_PERFEVTSEL0                 0x3b2
87 #define SNB_UNC_ARB_MSR_OFFSET                  0x10
88
89 /* NHM global control register */
90 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
91 #define NHM_UNC_FIXED_CTR                       0x394
92 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
93
94 /* NHM uncore global control */
95 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
96 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
97
98 /* NHM uncore register */
99 #define NHM_UNC_PERFEVTSEL0                     0x3c0
100 #define NHM_UNC_UNCORE_PMC0                     0x3b0
101
102 /* SKL uncore global control */
103 #define SKL_UNC_PERF_GLOBAL_CTL                 0xe01
104 #define SKL_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 5) - 1)
105
106 /* ICL Cbo register */
107 #define ICL_UNC_CBO_CONFIG                      0x396
108 #define ICL_UNC_NUM_CBO_MASK                    0xf
109 #define ICL_UNC_CBO_0_PER_CTR0                  0x702
110 #define ICL_UNC_CBO_MSR_OFFSET                  0x8
111
112 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
113 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
114 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
115 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
116 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
117 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
118
119 /* Sandy Bridge uncore support */
120 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
121 {
122         struct hw_perf_event *hwc = &event->hw;
123
124         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
125                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
126         else
127                 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
128 }
129
130 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
131 {
132         wrmsrl(event->hw.config_base, 0);
133 }
134
135 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
136 {
137         if (box->pmu->pmu_idx == 0) {
138                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
139                         SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
140         }
141 }
142
143 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
144 {
145         wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
146                 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
147 }
148
149 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
150 {
151         if (box->pmu->pmu_idx == 0)
152                 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
153 }
154
155 static struct uncore_event_desc snb_uncore_events[] = {
156         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
157         { /* end: all zeroes */ },
158 };
159
160 static struct attribute *snb_uncore_formats_attr[] = {
161         &format_attr_event.attr,
162         &format_attr_umask.attr,
163         &format_attr_edge.attr,
164         &format_attr_inv.attr,
165         &format_attr_cmask5.attr,
166         NULL,
167 };
168
169 static const struct attribute_group snb_uncore_format_group = {
170         .name           = "format",
171         .attrs          = snb_uncore_formats_attr,
172 };
173
174 static struct intel_uncore_ops snb_uncore_msr_ops = {
175         .init_box       = snb_uncore_msr_init_box,
176         .enable_box     = snb_uncore_msr_enable_box,
177         .exit_box       = snb_uncore_msr_exit_box,
178         .disable_event  = snb_uncore_msr_disable_event,
179         .enable_event   = snb_uncore_msr_enable_event,
180         .read_counter   = uncore_msr_read_counter,
181 };
182
183 static struct event_constraint snb_uncore_arb_constraints[] = {
184         UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
185         UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
186         EVENT_CONSTRAINT_END
187 };
188
189 static struct intel_uncore_type snb_uncore_cbox = {
190         .name           = "cbox",
191         .num_counters   = 2,
192         .num_boxes      = 4,
193         .perf_ctr_bits  = 44,
194         .fixed_ctr_bits = 48,
195         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
196         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
197         .fixed_ctr      = SNB_UNC_FIXED_CTR,
198         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
199         .single_fixed   = 1,
200         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
201         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
202         .ops            = &snb_uncore_msr_ops,
203         .format_group   = &snb_uncore_format_group,
204         .event_descs    = snb_uncore_events,
205 };
206
207 static struct intel_uncore_type snb_uncore_arb = {
208         .name           = "arb",
209         .num_counters   = 2,
210         .num_boxes      = 1,
211         .perf_ctr_bits  = 44,
212         .perf_ctr       = SNB_UNC_ARB_PER_CTR0,
213         .event_ctl      = SNB_UNC_ARB_PERFEVTSEL0,
214         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
215         .msr_offset     = SNB_UNC_ARB_MSR_OFFSET,
216         .constraints    = snb_uncore_arb_constraints,
217         .ops            = &snb_uncore_msr_ops,
218         .format_group   = &snb_uncore_format_group,
219 };
220
221 static struct intel_uncore_type *snb_msr_uncores[] = {
222         &snb_uncore_cbox,
223         &snb_uncore_arb,
224         NULL,
225 };
226
227 void snb_uncore_cpu_init(void)
228 {
229         uncore_msr_uncores = snb_msr_uncores;
230         if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
231                 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
232 }
233
234 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
235 {
236         if (box->pmu->pmu_idx == 0) {
237                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
238                         SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
239         }
240
241         /* The 8th CBOX has different MSR space */
242         if (box->pmu->pmu_idx == 7)
243                 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
244 }
245
246 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
247 {
248         wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
249                 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
250 }
251
252 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
253 {
254         if (box->pmu->pmu_idx == 0)
255                 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
256 }
257
258 static struct intel_uncore_ops skl_uncore_msr_ops = {
259         .init_box       = skl_uncore_msr_init_box,
260         .enable_box     = skl_uncore_msr_enable_box,
261         .exit_box       = skl_uncore_msr_exit_box,
262         .disable_event  = snb_uncore_msr_disable_event,
263         .enable_event   = snb_uncore_msr_enable_event,
264         .read_counter   = uncore_msr_read_counter,
265 };
266
267 static struct intel_uncore_type skl_uncore_cbox = {
268         .name           = "cbox",
269         .num_counters   = 4,
270         .num_boxes      = 8,
271         .perf_ctr_bits  = 44,
272         .fixed_ctr_bits = 48,
273         .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
274         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
275         .fixed_ctr      = SNB_UNC_FIXED_CTR,
276         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
277         .single_fixed   = 1,
278         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
279         .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
280         .ops            = &skl_uncore_msr_ops,
281         .format_group   = &snb_uncore_format_group,
282         .event_descs    = snb_uncore_events,
283 };
284
285 static struct intel_uncore_type *skl_msr_uncores[] = {
286         &skl_uncore_cbox,
287         &snb_uncore_arb,
288         NULL,
289 };
290
291 void skl_uncore_cpu_init(void)
292 {
293         uncore_msr_uncores = skl_msr_uncores;
294         if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
295                 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
296         snb_uncore_arb.ops = &skl_uncore_msr_ops;
297 }
298
299 static struct intel_uncore_type icl_uncore_cbox = {
300         .name           = "cbox",
301         .num_counters   = 4,
302         .perf_ctr_bits  = 44,
303         .perf_ctr       = ICL_UNC_CBO_0_PER_CTR0,
304         .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
305         .event_mask     = SNB_UNC_RAW_EVENT_MASK,
306         .msr_offset     = ICL_UNC_CBO_MSR_OFFSET,
307         .ops            = &skl_uncore_msr_ops,
308         .format_group   = &snb_uncore_format_group,
309 };
310
311 static struct uncore_event_desc icl_uncore_events[] = {
312         INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
313         { /* end: all zeroes */ },
314 };
315
316 static struct attribute *icl_uncore_clock_formats_attr[] = {
317         &format_attr_event.attr,
318         NULL,
319 };
320
321 static struct attribute_group icl_uncore_clock_format_group = {
322         .name = "format",
323         .attrs = icl_uncore_clock_formats_attr,
324 };
325
326 static struct intel_uncore_type icl_uncore_clockbox = {
327         .name           = "clock",
328         .num_counters   = 1,
329         .num_boxes      = 1,
330         .fixed_ctr_bits = 48,
331         .fixed_ctr      = SNB_UNC_FIXED_CTR,
332         .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
333         .single_fixed   = 1,
334         .event_mask     = SNB_UNC_CTL_EV_SEL_MASK,
335         .format_group   = &icl_uncore_clock_format_group,
336         .ops            = &skl_uncore_msr_ops,
337         .event_descs    = icl_uncore_events,
338 };
339
340 static struct intel_uncore_type *icl_msr_uncores[] = {
341         &icl_uncore_cbox,
342         &snb_uncore_arb,
343         &icl_uncore_clockbox,
344         NULL,
345 };
346
347 static int icl_get_cbox_num(void)
348 {
349         u64 num_boxes;
350
351         rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
352
353         return num_boxes & ICL_UNC_NUM_CBO_MASK;
354 }
355
356 void icl_uncore_cpu_init(void)
357 {
358         uncore_msr_uncores = icl_msr_uncores;
359         icl_uncore_cbox.num_boxes = icl_get_cbox_num();
360         snb_uncore_arb.ops = &skl_uncore_msr_ops;
361 }
362
363 enum {
364         SNB_PCI_UNCORE_IMC,
365 };
366
367 static struct uncore_event_desc snb_uncore_imc_events[] = {
368         INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
369         INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
370         INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
371
372         INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
373         INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
374         INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
375
376         { /* end: all zeroes */ },
377 };
378
379 #define SNB_UNCORE_PCI_IMC_EVENT_MASK           0xff
380 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET           0x48
381
382 /* page size multiple covering all config regs */
383 #define SNB_UNCORE_PCI_IMC_MAP_SIZE             0x6000
384
385 #define SNB_UNCORE_PCI_IMC_DATA_READS           0x1
386 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE      0x5050
387 #define SNB_UNCORE_PCI_IMC_DATA_WRITES          0x2
388 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE     0x5054
389 #define SNB_UNCORE_PCI_IMC_CTR_BASE             SNB_UNCORE_PCI_IMC_DATA_READS_BASE
390
391 enum perf_snb_uncore_imc_freerunning_types {
392         SNB_PCI_UNCORE_IMC_DATA         = 0,
393         SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
394 };
395
396 static struct freerunning_counters snb_uncore_imc_freerunning[] = {
397         [SNB_PCI_UNCORE_IMC_DATA]     = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 },
398 };
399
400 static struct attribute *snb_uncore_imc_formats_attr[] = {
401         &format_attr_event.attr,
402         NULL,
403 };
404
405 static const struct attribute_group snb_uncore_imc_format_group = {
406         .name = "format",
407         .attrs = snb_uncore_imc_formats_attr,
408 };
409
410 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
411 {
412         struct pci_dev *pdev = box->pci_dev;
413         int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
414         resource_size_t addr;
415         u32 pci_dword;
416
417         pci_read_config_dword(pdev, where, &pci_dword);
418         addr = pci_dword;
419
420 #ifdef CONFIG_PHYS_ADDR_T_64BIT
421         pci_read_config_dword(pdev, where + 4, &pci_dword);
422         addr |= ((resource_size_t)pci_dword << 32);
423 #endif
424
425         addr &= ~(PAGE_SIZE - 1);
426
427         box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
428         box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
429 }
430
431 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
432 {}
433
434 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
435 {}
436
437 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
438 {}
439
440 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
441 {}
442
443 /*
444  * Keep the custom event_init() function compatible with old event
445  * encoding for free running counters.
446  */
447 static int snb_uncore_imc_event_init(struct perf_event *event)
448 {
449         struct intel_uncore_pmu *pmu;
450         struct intel_uncore_box *box;
451         struct hw_perf_event *hwc = &event->hw;
452         u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
453         int idx, base;
454
455         if (event->attr.type != event->pmu->type)
456                 return -ENOENT;
457
458         pmu = uncore_event_to_pmu(event);
459         /* no device found for this pmu */
460         if (pmu->func_id < 0)
461                 return -ENOENT;
462
463         /* Sampling not supported yet */
464         if (hwc->sample_period)
465                 return -EINVAL;
466
467         /* unsupported modes and filters */
468         if (event->attr.sample_period) /* no sampling */
469                 return -EINVAL;
470
471         /*
472          * Place all uncore events for a particular physical package
473          * onto a single cpu
474          */
475         if (event->cpu < 0)
476                 return -EINVAL;
477
478         /* check only supported bits are set */
479         if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
480                 return -EINVAL;
481
482         box = uncore_pmu_to_box(pmu, event->cpu);
483         if (!box || box->cpu < 0)
484                 return -EINVAL;
485
486         event->cpu = box->cpu;
487         event->pmu_private = box;
488
489         event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
490
491         event->hw.idx = -1;
492         event->hw.last_tag = ~0ULL;
493         event->hw.extra_reg.idx = EXTRA_REG_NONE;
494         event->hw.branch_reg.idx = EXTRA_REG_NONE;
495         /*
496          * check event is known (whitelist, determines counter)
497          */
498         switch (cfg) {
499         case SNB_UNCORE_PCI_IMC_DATA_READS:
500                 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
501                 idx = UNCORE_PMC_IDX_FREERUNNING;
502                 break;
503         case SNB_UNCORE_PCI_IMC_DATA_WRITES:
504                 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
505                 idx = UNCORE_PMC_IDX_FREERUNNING;
506                 break;
507         default:
508                 return -EINVAL;
509         }
510
511         /* must be done before validate_group */
512         event->hw.event_base = base;
513         event->hw.idx = idx;
514
515         /* Convert to standard encoding format for freerunning counters */
516         event->hw.config = ((cfg - 1) << 8) | 0x10ff;
517
518         /* no group validation needed, we have free running counters */
519
520         return 0;
521 }
522
523 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
524 {
525         return 0;
526 }
527
528 int snb_pci2phy_map_init(int devid)
529 {
530         struct pci_dev *dev = NULL;
531         struct pci2phy_map *map;
532         int bus, segment;
533
534         dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
535         if (!dev)
536                 return -ENOTTY;
537
538         bus = dev->bus->number;
539         segment = pci_domain_nr(dev->bus);
540
541         raw_spin_lock(&pci2phy_map_lock);
542         map = __find_pci2phy_map(segment);
543         if (!map) {
544                 raw_spin_unlock(&pci2phy_map_lock);
545                 pci_dev_put(dev);
546                 return -ENOMEM;
547         }
548         map->pbus_to_physid[bus] = 0;
549         raw_spin_unlock(&pci2phy_map_lock);
550
551         pci_dev_put(dev);
552
553         return 0;
554 }
555
556 static struct pmu snb_uncore_imc_pmu = {
557         .task_ctx_nr    = perf_invalid_context,
558         .event_init     = snb_uncore_imc_event_init,
559         .add            = uncore_pmu_event_add,
560         .del            = uncore_pmu_event_del,
561         .start          = uncore_pmu_event_start,
562         .stop           = uncore_pmu_event_stop,
563         .read           = uncore_pmu_event_read,
564         .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
565 };
566
567 static struct intel_uncore_ops snb_uncore_imc_ops = {
568         .init_box       = snb_uncore_imc_init_box,
569         .exit_box       = uncore_mmio_exit_box,
570         .enable_box     = snb_uncore_imc_enable_box,
571         .disable_box    = snb_uncore_imc_disable_box,
572         .disable_event  = snb_uncore_imc_disable_event,
573         .enable_event   = snb_uncore_imc_enable_event,
574         .hw_config      = snb_uncore_imc_hw_config,
575         .read_counter   = uncore_mmio_read_counter,
576 };
577
578 static struct intel_uncore_type snb_uncore_imc = {
579         .name           = "imc",
580         .num_counters   = 2,
581         .num_boxes      = 1,
582         .num_freerunning_types  = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
583         .freerunning    = snb_uncore_imc_freerunning,
584         .event_descs    = snb_uncore_imc_events,
585         .format_group   = &snb_uncore_imc_format_group,
586         .ops            = &snb_uncore_imc_ops,
587         .pmu            = &snb_uncore_imc_pmu,
588 };
589
590 static struct intel_uncore_type *snb_pci_uncores[] = {
591         [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
592         NULL,
593 };
594
595 static const struct pci_device_id snb_uncore_pci_ids[] = {
596         { /* IMC */
597                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
598                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
599         },
600         { /* end: all zeroes */ },
601 };
602
603 static const struct pci_device_id ivb_uncore_pci_ids[] = {
604         { /* IMC */
605                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
606                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
607         },
608         { /* IMC */
609                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
610                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
611         },
612         { /* end: all zeroes */ },
613 };
614
615 static const struct pci_device_id hsw_uncore_pci_ids[] = {
616         { /* IMC */
617                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
618                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
619         },
620         { /* IMC */
621                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
622                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
623         },
624         { /* end: all zeroes */ },
625 };
626
627 static const struct pci_device_id bdw_uncore_pci_ids[] = {
628         { /* IMC */
629                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
630                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
631         },
632         { /* end: all zeroes */ },
633 };
634
635 static const struct pci_device_id skl_uncore_pci_ids[] = {
636         { /* IMC */
637                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
638                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
639         },
640         { /* IMC */
641                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
642                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
643         },
644         { /* IMC */
645                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
646                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
647         },
648         { /* IMC */
649                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
650                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
651         },
652         { /* IMC */
653                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
654                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
655         },
656         { /* IMC */
657                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
658                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
659         },
660         { /* IMC */
661                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
662                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
663         },
664         { /* IMC */
665                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
666                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
667         },
668         { /* IMC */
669                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
670                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
671         },
672         { /* IMC */
673                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
674                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
675         },
676         { /* IMC */
677                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
678                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
679         },
680         { /* IMC */
681                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_HQ_IMC),
682                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
683         },
684         { /* IMC */
685                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_WQ_IMC),
686                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
687         },
688         { /* IMC */
689                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
690                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
691         },
692         { /* IMC */
693                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
694                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
695         },
696         { /* IMC */
697                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
698                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
699         },
700         { /* IMC */
701                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
702                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
703         },
704         { /* IMC */
705                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
706                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
707         },
708         { /* IMC */
709                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
710                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
711         },
712         { /* IMC */
713                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
714                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
715         },
716         { /* IMC */
717                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
718                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
719         },
720         { /* IMC */
721                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
722                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
723         },
724         { /* IMC */
725                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
726                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
727         },
728         { /* IMC */
729                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
730                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
731         },
732         { /* IMC */
733                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
734                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
735         },
736         { /* IMC */
737                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
738                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
739         },
740         { /* IMC */
741                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
742                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
743         },
744         { /* IMC */
745                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YD_IMC),
746                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
747         },
748         { /* IMC */
749                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YQ_IMC),
750                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
751         },
752         { /* IMC */
753                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UQ_IMC),
754                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
755         },
756         { /* IMC */
757                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC),
758                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
759         },
760         { /* IMC */
761                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC),
762                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
763         },
764         { /* end: all zeroes */ },
765 };
766
767 static const struct pci_device_id icl_uncore_pci_ids[] = {
768         { /* IMC */
769                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
770                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
771         },
772         { /* IMC */
773                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
774                 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
775         },
776         { /* end: all zeroes */ },
777 };
778
779 static struct pci_driver snb_uncore_pci_driver = {
780         .name           = "snb_uncore",
781         .id_table       = snb_uncore_pci_ids,
782 };
783
784 static struct pci_driver ivb_uncore_pci_driver = {
785         .name           = "ivb_uncore",
786         .id_table       = ivb_uncore_pci_ids,
787 };
788
789 static struct pci_driver hsw_uncore_pci_driver = {
790         .name           = "hsw_uncore",
791         .id_table       = hsw_uncore_pci_ids,
792 };
793
794 static struct pci_driver bdw_uncore_pci_driver = {
795         .name           = "bdw_uncore",
796         .id_table       = bdw_uncore_pci_ids,
797 };
798
799 static struct pci_driver skl_uncore_pci_driver = {
800         .name           = "skl_uncore",
801         .id_table       = skl_uncore_pci_ids,
802 };
803
804 static struct pci_driver icl_uncore_pci_driver = {
805         .name           = "icl_uncore",
806         .id_table       = icl_uncore_pci_ids,
807 };
808
809 struct imc_uncore_pci_dev {
810         __u32 pci_id;
811         struct pci_driver *driver;
812 };
813 #define IMC_DEV(a, d) \
814         { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
815
816 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
817         IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
818         IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
819         IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
820         IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
821         IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
822         IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
823         IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
824         IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
825         IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
826         IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
827         IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
828         IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
829         IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
830         IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
831         IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
832         IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Dual Core */
833         IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Quad Core */
834         IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core H Quad Core */
835         IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S 4 cores Work Station */
836         IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 2 Cores */
837         IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 4 Cores */
838         IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 4 Cores */
839         IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 6 Cores */
840         IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 2 Cores Desktop */
841         IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Desktop */
842         IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Desktop */
843         IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Desktop */
844         IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Work Station */
845         IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Work Station */
846         IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Work Station */
847         IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
848         IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
849         IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
850         IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core Y Mobile Dual Core */
851         IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core Y Mobile Quad Core */
852         IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core U Mobile Quad Core */
853         IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U Mobile Quad Core */
854         IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver),    /* 8th Gen Core U Mobile Dual Core */
855         IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver),     /* 10th Gen Core Mobile */
856         IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver),    /* 10th Gen Core Mobile */
857         {  /* end marker */ }
858 };
859
860
861 #define for_each_imc_pci_id(x, t) \
862         for (x = (t); (x)->pci_id; x++)
863
864 static struct pci_driver *imc_uncore_find_dev(void)
865 {
866         const struct imc_uncore_pci_dev *p;
867         int ret;
868
869         for_each_imc_pci_id(p, desktop_imc_pci_ids) {
870                 ret = snb_pci2phy_map_init(p->pci_id);
871                 if (ret == 0)
872                         return p->driver;
873         }
874         return NULL;
875 }
876
877 static int imc_uncore_pci_init(void)
878 {
879         struct pci_driver *imc_drv = imc_uncore_find_dev();
880
881         if (!imc_drv)
882                 return -ENODEV;
883
884         uncore_pci_uncores = snb_pci_uncores;
885         uncore_pci_driver = imc_drv;
886
887         return 0;
888 }
889
890 int snb_uncore_pci_init(void)
891 {
892         return imc_uncore_pci_init();
893 }
894
895 int ivb_uncore_pci_init(void)
896 {
897         return imc_uncore_pci_init();
898 }
899 int hsw_uncore_pci_init(void)
900 {
901         return imc_uncore_pci_init();
902 }
903
904 int bdw_uncore_pci_init(void)
905 {
906         return imc_uncore_pci_init();
907 }
908
909 int skl_uncore_pci_init(void)
910 {
911         return imc_uncore_pci_init();
912 }
913
914 /* end of Sandy Bridge uncore support */
915
916 /* Nehalem uncore support */
917 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
918 {
919         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
920 }
921
922 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
923 {
924         wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
925 }
926
927 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
928 {
929         struct hw_perf_event *hwc = &event->hw;
930
931         if (hwc->idx < UNCORE_PMC_IDX_FIXED)
932                 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
933         else
934                 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
935 }
936
937 static struct attribute *nhm_uncore_formats_attr[] = {
938         &format_attr_event.attr,
939         &format_attr_umask.attr,
940         &format_attr_edge.attr,
941         &format_attr_inv.attr,
942         &format_attr_cmask8.attr,
943         NULL,
944 };
945
946 static const struct attribute_group nhm_uncore_format_group = {
947         .name = "format",
948         .attrs = nhm_uncore_formats_attr,
949 };
950
951 static struct uncore_event_desc nhm_uncore_events[] = {
952         INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
953         INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
954         INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
955         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
956         INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
957         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
958         INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
959         INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
960         INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
961         { /* end: all zeroes */ },
962 };
963
964 static struct intel_uncore_ops nhm_uncore_msr_ops = {
965         .disable_box    = nhm_uncore_msr_disable_box,
966         .enable_box     = nhm_uncore_msr_enable_box,
967         .disable_event  = snb_uncore_msr_disable_event,
968         .enable_event   = nhm_uncore_msr_enable_event,
969         .read_counter   = uncore_msr_read_counter,
970 };
971
972 static struct intel_uncore_type nhm_uncore = {
973         .name           = "",
974         .num_counters   = 8,
975         .num_boxes      = 1,
976         .perf_ctr_bits  = 48,
977         .fixed_ctr_bits = 48,
978         .event_ctl      = NHM_UNC_PERFEVTSEL0,
979         .perf_ctr       = NHM_UNC_UNCORE_PMC0,
980         .fixed_ctr      = NHM_UNC_FIXED_CTR,
981         .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
982         .event_mask     = NHM_UNC_RAW_EVENT_MASK,
983         .event_descs    = nhm_uncore_events,
984         .ops            = &nhm_uncore_msr_ops,
985         .format_group   = &nhm_uncore_format_group,
986 };
987
988 static struct intel_uncore_type *nhm_msr_uncores[] = {
989         &nhm_uncore,
990         NULL,
991 };
992
993 void nhm_uncore_cpu_init(void)
994 {
995         uncore_msr_uncores = nhm_msr_uncores;
996 }
997
998 /* end of Nehalem uncore support */