1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
5 /* Uncore IMC PCI IDs */
6 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
7 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
8 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
9 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
10 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
11 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
13 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
14 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
15 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
16 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
17 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
18 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
19 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
20 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
21 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
22 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
23 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC 0x5910
24 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC 0x5918
25 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
26 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
27 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
28 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
29 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f
30 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f
31 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2
32 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30
33 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18
34 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6
35 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31
36 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
37 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
38 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
39 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC 0x590c
40 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC 0x590d
41 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC 0x3ed0
42 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC 0x3e34
43 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35
44 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02
45 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12
48 /* SNB event control */
49 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
50 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
51 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
52 #define SNB_UNC_CTL_EN (1 << 22)
53 #define SNB_UNC_CTL_INVERT (1 << 23)
54 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
55 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
56 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
58 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
59 SNB_UNC_CTL_UMASK_MASK | \
60 SNB_UNC_CTL_EDGE_DET | \
61 SNB_UNC_CTL_INVERT | \
62 SNB_UNC_CTL_CMASK_MASK)
64 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
65 SNB_UNC_CTL_UMASK_MASK | \
66 SNB_UNC_CTL_EDGE_DET | \
67 SNB_UNC_CTL_INVERT | \
68 NHM_UNC_CTL_CMASK_MASK)
70 /* SNB global control register */
71 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
72 #define SNB_UNC_FIXED_CTR_CTRL 0x394
73 #define SNB_UNC_FIXED_CTR 0x395
75 /* SNB uncore global control */
76 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
77 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
79 /* SNB Cbo register */
80 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
81 #define SNB_UNC_CBO_0_PER_CTR0 0x706
82 #define SNB_UNC_CBO_MSR_OFFSET 0x10
84 /* SNB ARB register */
85 #define SNB_UNC_ARB_PER_CTR0 0x3b0
86 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2
87 #define SNB_UNC_ARB_MSR_OFFSET 0x10
89 /* NHM global control register */
90 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
91 #define NHM_UNC_FIXED_CTR 0x394
92 #define NHM_UNC_FIXED_CTR_CTRL 0x395
94 /* NHM uncore global control */
95 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
96 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
98 /* NHM uncore register */
99 #define NHM_UNC_PERFEVTSEL0 0x3c0
100 #define NHM_UNC_UNCORE_PMC0 0x3b0
102 /* SKL uncore global control */
103 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01
104 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
106 /* ICL Cbo register */
107 #define ICL_UNC_CBO_CONFIG 0x396
108 #define ICL_UNC_NUM_CBO_MASK 0xf
109 #define ICL_UNC_CBO_0_PER_CTR0 0x702
110 #define ICL_UNC_CBO_MSR_OFFSET 0x8
112 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
113 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
114 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
115 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
116 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
117 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
119 /* Sandy Bridge uncore support */
120 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
122 struct hw_perf_event *hwc = &event->hw;
124 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
125 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
127 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
130 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
132 wrmsrl(event->hw.config_base, 0);
135 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
137 if (box->pmu->pmu_idx == 0) {
138 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
139 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
143 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
145 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
146 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
149 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
151 if (box->pmu->pmu_idx == 0)
152 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
155 static struct uncore_event_desc snb_uncore_events[] = {
156 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
157 { /* end: all zeroes */ },
160 static struct attribute *snb_uncore_formats_attr[] = {
161 &format_attr_event.attr,
162 &format_attr_umask.attr,
163 &format_attr_edge.attr,
164 &format_attr_inv.attr,
165 &format_attr_cmask5.attr,
169 static const struct attribute_group snb_uncore_format_group = {
171 .attrs = snb_uncore_formats_attr,
174 static struct intel_uncore_ops snb_uncore_msr_ops = {
175 .init_box = snb_uncore_msr_init_box,
176 .enable_box = snb_uncore_msr_enable_box,
177 .exit_box = snb_uncore_msr_exit_box,
178 .disable_event = snb_uncore_msr_disable_event,
179 .enable_event = snb_uncore_msr_enable_event,
180 .read_counter = uncore_msr_read_counter,
183 static struct event_constraint snb_uncore_arb_constraints[] = {
184 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
185 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
189 static struct intel_uncore_type snb_uncore_cbox = {
194 .fixed_ctr_bits = 48,
195 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
196 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
197 .fixed_ctr = SNB_UNC_FIXED_CTR,
198 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
200 .event_mask = SNB_UNC_RAW_EVENT_MASK,
201 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
202 .ops = &snb_uncore_msr_ops,
203 .format_group = &snb_uncore_format_group,
204 .event_descs = snb_uncore_events,
207 static struct intel_uncore_type snb_uncore_arb = {
212 .perf_ctr = SNB_UNC_ARB_PER_CTR0,
213 .event_ctl = SNB_UNC_ARB_PERFEVTSEL0,
214 .event_mask = SNB_UNC_RAW_EVENT_MASK,
215 .msr_offset = SNB_UNC_ARB_MSR_OFFSET,
216 .constraints = snb_uncore_arb_constraints,
217 .ops = &snb_uncore_msr_ops,
218 .format_group = &snb_uncore_format_group,
221 static struct intel_uncore_type *snb_msr_uncores[] = {
227 void snb_uncore_cpu_init(void)
229 uncore_msr_uncores = snb_msr_uncores;
230 if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
231 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
234 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
236 if (box->pmu->pmu_idx == 0) {
237 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
238 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
241 /* The 8th CBOX has different MSR space */
242 if (box->pmu->pmu_idx == 7)
243 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
246 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
248 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
249 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
252 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
254 if (box->pmu->pmu_idx == 0)
255 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
258 static struct intel_uncore_ops skl_uncore_msr_ops = {
259 .init_box = skl_uncore_msr_init_box,
260 .enable_box = skl_uncore_msr_enable_box,
261 .exit_box = skl_uncore_msr_exit_box,
262 .disable_event = snb_uncore_msr_disable_event,
263 .enable_event = snb_uncore_msr_enable_event,
264 .read_counter = uncore_msr_read_counter,
267 static struct intel_uncore_type skl_uncore_cbox = {
272 .fixed_ctr_bits = 48,
273 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
274 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
275 .fixed_ctr = SNB_UNC_FIXED_CTR,
276 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
278 .event_mask = SNB_UNC_RAW_EVENT_MASK,
279 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
280 .ops = &skl_uncore_msr_ops,
281 .format_group = &snb_uncore_format_group,
282 .event_descs = snb_uncore_events,
285 static struct intel_uncore_type *skl_msr_uncores[] = {
291 void skl_uncore_cpu_init(void)
293 uncore_msr_uncores = skl_msr_uncores;
294 if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
295 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
296 snb_uncore_arb.ops = &skl_uncore_msr_ops;
299 static struct intel_uncore_type icl_uncore_cbox = {
303 .perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
304 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
305 .event_mask = SNB_UNC_RAW_EVENT_MASK,
306 .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
307 .ops = &skl_uncore_msr_ops,
308 .format_group = &snb_uncore_format_group,
311 static struct uncore_event_desc icl_uncore_events[] = {
312 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
313 { /* end: all zeroes */ },
316 static struct attribute *icl_uncore_clock_formats_attr[] = {
317 &format_attr_event.attr,
321 static struct attribute_group icl_uncore_clock_format_group = {
323 .attrs = icl_uncore_clock_formats_attr,
326 static struct intel_uncore_type icl_uncore_clockbox = {
330 .fixed_ctr_bits = 48,
331 .fixed_ctr = SNB_UNC_FIXED_CTR,
332 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
334 .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
335 .format_group = &icl_uncore_clock_format_group,
336 .ops = &skl_uncore_msr_ops,
337 .event_descs = icl_uncore_events,
340 static struct intel_uncore_type *icl_msr_uncores[] = {
343 &icl_uncore_clockbox,
347 static int icl_get_cbox_num(void)
351 rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
353 return num_boxes & ICL_UNC_NUM_CBO_MASK;
356 void icl_uncore_cpu_init(void)
358 uncore_msr_uncores = icl_msr_uncores;
359 icl_uncore_cbox.num_boxes = icl_get_cbox_num();
360 snb_uncore_arb.ops = &skl_uncore_msr_ops;
367 static struct uncore_event_desc snb_uncore_imc_events[] = {
368 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
369 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
370 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
372 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
373 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
374 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
376 { /* end: all zeroes */ },
379 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
380 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
382 /* page size multiple covering all config regs */
383 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
385 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
386 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
387 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
388 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
389 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
391 enum perf_snb_uncore_imc_freerunning_types {
392 SNB_PCI_UNCORE_IMC_DATA = 0,
393 SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
396 static struct freerunning_counters snb_uncore_imc_freerunning[] = {
397 [SNB_PCI_UNCORE_IMC_DATA] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 },
400 static struct attribute *snb_uncore_imc_formats_attr[] = {
401 &format_attr_event.attr,
405 static const struct attribute_group snb_uncore_imc_format_group = {
407 .attrs = snb_uncore_imc_formats_attr,
410 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
412 struct pci_dev *pdev = box->pci_dev;
413 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
414 resource_size_t addr;
417 pci_read_config_dword(pdev, where, &pci_dword);
420 #ifdef CONFIG_PHYS_ADDR_T_64BIT
421 pci_read_config_dword(pdev, where + 4, &pci_dword);
422 addr |= ((resource_size_t)pci_dword << 32);
425 addr &= ~(PAGE_SIZE - 1);
427 box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
428 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
431 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
434 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
437 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
440 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
444 * Keep the custom event_init() function compatible with old event
445 * encoding for free running counters.
447 static int snb_uncore_imc_event_init(struct perf_event *event)
449 struct intel_uncore_pmu *pmu;
450 struct intel_uncore_box *box;
451 struct hw_perf_event *hwc = &event->hw;
452 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
455 if (event->attr.type != event->pmu->type)
458 pmu = uncore_event_to_pmu(event);
459 /* no device found for this pmu */
460 if (pmu->func_id < 0)
463 /* Sampling not supported yet */
464 if (hwc->sample_period)
467 /* unsupported modes and filters */
468 if (event->attr.sample_period) /* no sampling */
472 * Place all uncore events for a particular physical package
478 /* check only supported bits are set */
479 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
482 box = uncore_pmu_to_box(pmu, event->cpu);
483 if (!box || box->cpu < 0)
486 event->cpu = box->cpu;
487 event->pmu_private = box;
489 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
492 event->hw.last_tag = ~0ULL;
493 event->hw.extra_reg.idx = EXTRA_REG_NONE;
494 event->hw.branch_reg.idx = EXTRA_REG_NONE;
496 * check event is known (whitelist, determines counter)
499 case SNB_UNCORE_PCI_IMC_DATA_READS:
500 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
501 idx = UNCORE_PMC_IDX_FREERUNNING;
503 case SNB_UNCORE_PCI_IMC_DATA_WRITES:
504 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
505 idx = UNCORE_PMC_IDX_FREERUNNING;
511 /* must be done before validate_group */
512 event->hw.event_base = base;
515 /* Convert to standard encoding format for freerunning counters */
516 event->hw.config = ((cfg - 1) << 8) | 0x10ff;
518 /* no group validation needed, we have free running counters */
523 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
528 int snb_pci2phy_map_init(int devid)
530 struct pci_dev *dev = NULL;
531 struct pci2phy_map *map;
534 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
538 bus = dev->bus->number;
539 segment = pci_domain_nr(dev->bus);
541 raw_spin_lock(&pci2phy_map_lock);
542 map = __find_pci2phy_map(segment);
544 raw_spin_unlock(&pci2phy_map_lock);
548 map->pbus_to_physid[bus] = 0;
549 raw_spin_unlock(&pci2phy_map_lock);
556 static struct pmu snb_uncore_imc_pmu = {
557 .task_ctx_nr = perf_invalid_context,
558 .event_init = snb_uncore_imc_event_init,
559 .add = uncore_pmu_event_add,
560 .del = uncore_pmu_event_del,
561 .start = uncore_pmu_event_start,
562 .stop = uncore_pmu_event_stop,
563 .read = uncore_pmu_event_read,
564 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
567 static struct intel_uncore_ops snb_uncore_imc_ops = {
568 .init_box = snb_uncore_imc_init_box,
569 .exit_box = uncore_mmio_exit_box,
570 .enable_box = snb_uncore_imc_enable_box,
571 .disable_box = snb_uncore_imc_disable_box,
572 .disable_event = snb_uncore_imc_disable_event,
573 .enable_event = snb_uncore_imc_enable_event,
574 .hw_config = snb_uncore_imc_hw_config,
575 .read_counter = uncore_mmio_read_counter,
578 static struct intel_uncore_type snb_uncore_imc = {
582 .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
583 .freerunning = snb_uncore_imc_freerunning,
584 .event_descs = snb_uncore_imc_events,
585 .format_group = &snb_uncore_imc_format_group,
586 .ops = &snb_uncore_imc_ops,
587 .pmu = &snb_uncore_imc_pmu,
590 static struct intel_uncore_type *snb_pci_uncores[] = {
591 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
595 static const struct pci_device_id snb_uncore_pci_ids[] = {
597 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
598 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
600 { /* end: all zeroes */ },
603 static const struct pci_device_id ivb_uncore_pci_ids[] = {
605 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
606 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
609 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
610 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
612 { /* end: all zeroes */ },
615 static const struct pci_device_id hsw_uncore_pci_ids[] = {
617 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
618 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
621 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
622 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
624 { /* end: all zeroes */ },
627 static const struct pci_device_id bdw_uncore_pci_ids[] = {
629 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
630 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
632 { /* end: all zeroes */ },
635 static const struct pci_device_id skl_uncore_pci_ids[] = {
637 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
638 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
641 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
642 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
645 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
646 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
649 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
650 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
653 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
654 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
657 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
658 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
661 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
662 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
665 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
666 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
669 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
670 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
673 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
674 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
677 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
678 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
681 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_HQ_IMC),
682 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
685 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_WQ_IMC),
686 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
689 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
690 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
693 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
694 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
697 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
698 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
701 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
702 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
705 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
706 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
709 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
710 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
713 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
714 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
717 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
718 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
721 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
722 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
725 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
726 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
729 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
730 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
733 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
734 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
737 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
738 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
741 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
742 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
745 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YD_IMC),
746 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
749 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AML_YQ_IMC),
750 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
753 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UQ_IMC),
754 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
757 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC),
758 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
761 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WHL_UD_IMC),
762 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
764 { /* end: all zeroes */ },
767 static const struct pci_device_id icl_uncore_pci_ids[] = {
769 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
770 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
773 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
774 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
776 { /* end: all zeroes */ },
779 static struct pci_driver snb_uncore_pci_driver = {
780 .name = "snb_uncore",
781 .id_table = snb_uncore_pci_ids,
784 static struct pci_driver ivb_uncore_pci_driver = {
785 .name = "ivb_uncore",
786 .id_table = ivb_uncore_pci_ids,
789 static struct pci_driver hsw_uncore_pci_driver = {
790 .name = "hsw_uncore",
791 .id_table = hsw_uncore_pci_ids,
794 static struct pci_driver bdw_uncore_pci_driver = {
795 .name = "bdw_uncore",
796 .id_table = bdw_uncore_pci_ids,
799 static struct pci_driver skl_uncore_pci_driver = {
800 .name = "skl_uncore",
801 .id_table = skl_uncore_pci_ids,
804 static struct pci_driver icl_uncore_pci_driver = {
805 .name = "icl_uncore",
806 .id_table = icl_uncore_pci_ids,
809 struct imc_uncore_pci_dev {
811 struct pci_driver *driver;
813 #define IMC_DEV(a, d) \
814 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
816 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
817 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
818 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */
819 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
820 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
821 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
822 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
823 IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */
824 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
825 IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */
826 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
827 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
828 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
829 IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
830 IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
831 IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
832 IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */
833 IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */
834 IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core H Quad Core */
835 IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S 4 cores Work Station */
836 IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */
837 IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */
838 IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */
839 IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */
840 IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */
841 IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */
842 IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */
843 IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */
844 IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */
845 IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */
846 IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */
847 IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
848 IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
849 IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
850 IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Dual Core */
851 IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Quad Core */
852 IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
853 IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
854 IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Dual Core */
855 IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
856 IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
861 #define for_each_imc_pci_id(x, t) \
862 for (x = (t); (x)->pci_id; x++)
864 static struct pci_driver *imc_uncore_find_dev(void)
866 const struct imc_uncore_pci_dev *p;
869 for_each_imc_pci_id(p, desktop_imc_pci_ids) {
870 ret = snb_pci2phy_map_init(p->pci_id);
877 static int imc_uncore_pci_init(void)
879 struct pci_driver *imc_drv = imc_uncore_find_dev();
884 uncore_pci_uncores = snb_pci_uncores;
885 uncore_pci_driver = imc_drv;
890 int snb_uncore_pci_init(void)
892 return imc_uncore_pci_init();
895 int ivb_uncore_pci_init(void)
897 return imc_uncore_pci_init();
899 int hsw_uncore_pci_init(void)
901 return imc_uncore_pci_init();
904 int bdw_uncore_pci_init(void)
906 return imc_uncore_pci_init();
909 int skl_uncore_pci_init(void)
911 return imc_uncore_pci_init();
914 /* end of Sandy Bridge uncore support */
916 /* Nehalem uncore support */
917 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
919 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
922 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
924 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
927 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
929 struct hw_perf_event *hwc = &event->hw;
931 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
932 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
934 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
937 static struct attribute *nhm_uncore_formats_attr[] = {
938 &format_attr_event.attr,
939 &format_attr_umask.attr,
940 &format_attr_edge.attr,
941 &format_attr_inv.attr,
942 &format_attr_cmask8.attr,
946 static const struct attribute_group nhm_uncore_format_group = {
948 .attrs = nhm_uncore_formats_attr,
951 static struct uncore_event_desc nhm_uncore_events[] = {
952 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
953 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
954 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
955 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
956 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
957 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
958 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
959 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
960 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
961 { /* end: all zeroes */ },
964 static struct intel_uncore_ops nhm_uncore_msr_ops = {
965 .disable_box = nhm_uncore_msr_disable_box,
966 .enable_box = nhm_uncore_msr_enable_box,
967 .disable_event = snb_uncore_msr_disable_event,
968 .enable_event = nhm_uncore_msr_enable_event,
969 .read_counter = uncore_msr_read_counter,
972 static struct intel_uncore_type nhm_uncore = {
977 .fixed_ctr_bits = 48,
978 .event_ctl = NHM_UNC_PERFEVTSEL0,
979 .perf_ctr = NHM_UNC_UNCORE_PMC0,
980 .fixed_ctr = NHM_UNC_FIXED_CTR,
981 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
982 .event_mask = NHM_UNC_RAW_EVENT_MASK,
983 .event_descs = nhm_uncore_events,
984 .ops = &nhm_uncore_msr_ops,
985 .format_group = &nhm_uncore_format_group,
988 static struct intel_uncore_type *nhm_msr_uncores[] = {
993 void nhm_uncore_cpu_init(void)
995 uncore_msr_uncores = nhm_msr_uncores;
998 /* end of Nehalem uncore support */