1 #include <linux/export.h>
2 #include <linux/bitops.h>
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <asm/processor.h>
13 #include <asm/pci-direct.h>
14 #include <asm/delay.h>
17 # include <asm/mmconfig.h>
18 # include <asm/cacheflush.h>
23 static const int amd_erratum_383[];
24 static const int amd_erratum_400[];
25 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
28 * nodes_per_socket: Stores the number of nodes per socket.
29 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
30 * Node Identifiers[10:8]
32 static u32 nodes_per_socket = 1;
34 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
39 WARN_ONCE((boot_cpu_data.x86 != 0xf),
40 "%s should only be used on K8!\n", __func__);
45 err = rdmsr_safe_regs(gprs);
47 *p = gprs[0] | ((u64)gprs[2] << 32);
52 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
56 WARN_ONCE((boot_cpu_data.x86 != 0xf),
57 "%s should only be used on K8!\n", __func__);
64 return wrmsr_safe_regs(gprs);
68 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
69 * misexecution of code under Linux. Owners of such processors should
70 * contact AMD for precise details and a CPU swap.
72 * See http://www.multimania.com/poulot/k6bug.html
73 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
74 * (Publication # 21266 Issue Date: August 1998)
76 * The following test is erm.. interesting. AMD neglected to up
77 * the chip setting when fixing the bug but they also tweaked some
78 * performance at the same time..
81 extern __visible void vide(void);
82 __asm__(".globl vide\n"
83 ".type vide, @function\n"
87 static void init_amd_k5(struct cpuinfo_x86 *c)
91 * General Systems BIOSen alias the cpu frequency registers
92 * of the Elan at 0x000df000. Unfortunately, one of the Linux
93 * drivers subsequently pokes it, and changes the CPU speed.
94 * Workaround : Remove the unneeded alias.
96 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
97 #define CBAR_ENB (0x80000000)
98 #define CBAR_KEY (0X000000CB)
99 if (c->x86_model == 9 || c->x86_model == 10) {
100 if (inl(CBAR) & CBAR_ENB)
101 outl(0 | CBAR_KEY, CBAR);
106 static void init_amd_k6(struct cpuinfo_x86 *c)
110 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
112 if (c->x86_model < 6) {
113 /* Based on AMD doc 20734R - June 2000 */
114 if (c->x86_model == 0) {
115 clear_cpu_cap(c, X86_FEATURE_APIC);
116 set_cpu_cap(c, X86_FEATURE_PGE);
121 if (c->x86_model == 6 && c->x86_mask == 1) {
122 const int K6_BUG_LOOP = 1000000;
124 void (*f_vide)(void);
127 pr_info("AMD K6 stepping B detected - ");
130 * It looks like AMD fixed the 2.6.2 bug and improved indirect
131 * calls at the same time.
142 if (d > 20*K6_BUG_LOOP)
143 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
145 pr_cont("probably OK (after B9730xxxx).\n");
148 /* K6 with old style WHCR */
149 if (c->x86_model < 8 ||
150 (c->x86_model == 8 && c->x86_mask < 8)) {
151 /* We can only write allocate on the low 508Mb */
155 rdmsr(MSR_K6_WHCR, l, h);
156 if ((l&0x0000FFFF) == 0) {
158 l = (1<<0)|((mbytes/4)<<1);
159 local_irq_save(flags);
161 wrmsr(MSR_K6_WHCR, l, h);
162 local_irq_restore(flags);
163 pr_info("Enabling old style K6 write allocation for %d Mb\n",
169 if ((c->x86_model == 8 && c->x86_mask > 7) ||
170 c->x86_model == 9 || c->x86_model == 13) {
171 /* The more serious chips .. */
176 rdmsr(MSR_K6_WHCR, l, h);
177 if ((l&0xFFFF0000) == 0) {
179 l = ((mbytes>>2)<<22)|(1<<16);
180 local_irq_save(flags);
182 wrmsr(MSR_K6_WHCR, l, h);
183 local_irq_restore(flags);
184 pr_info("Enabling new style K6 write allocation for %d Mb\n",
191 if (c->x86_model == 10) {
192 /* AMD Geode LX is model 10 */
193 /* placeholder for any needed mods */
199 static void init_amd_k7(struct cpuinfo_x86 *c)
205 * Bit 15 of Athlon specific MSR 15, needs to be 0
206 * to enable SSE on Palomino/Morgan/Barton CPU's.
207 * If the BIOS didn't enable it already, enable it here.
209 if (c->x86_model >= 6 && c->x86_model <= 10) {
210 if (!cpu_has(c, X86_FEATURE_XMM)) {
211 pr_info("Enabling disabled K7/SSE Support.\n");
212 msr_clear_bit(MSR_K7_HWCR, 15);
213 set_cpu_cap(c, X86_FEATURE_XMM);
218 * It's been determined by AMD that Athlons since model 8 stepping 1
219 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
220 * As per AMD technical note 27212 0.2
222 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
223 rdmsr(MSR_K7_CLK_CTL, l, h);
224 if ((l & 0xfff00000) != 0x20000000) {
225 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
226 l, ((l & 0x000fffff)|0x20000000));
227 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
231 set_cpu_cap(c, X86_FEATURE_K7);
233 /* calling is from identify_secondary_cpu() ? */
238 * Certain Athlons might work (for various values of 'work') in SMP
239 * but they are not certified as MP capable.
241 /* Athlon 660/661 is valid. */
242 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
246 /* Duron 670 is valid */
247 if ((c->x86_model == 7) && (c->x86_mask == 0))
251 * Athlon 662, Duron 671, and Athlon >model 7 have capability
252 * bit. It's worth noting that the A5 stepping (662) of some
253 * Athlon XP's have the MP bit set.
254 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
257 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
258 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
260 if (cpu_has(c, X86_FEATURE_MP))
263 /* If we get here, not a certified SMP capable AMD system. */
266 * Don't taint if we are running SMP kernel on a single non-MP
269 WARN_ONCE(1, "WARNING: This combination of AMD"
270 " processors is not suitable for SMP.\n");
271 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
277 * To workaround broken NUMA config. Read the comment in
278 * srat_detect_node().
280 static int nearby_node(int apicid)
284 for (i = apicid - 1; i >= 0; i--) {
285 node = __apicid_to_node[i];
286 if (node != NUMA_NO_NODE && node_online(node))
289 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
290 node = __apicid_to_node[i];
291 if (node != NUMA_NO_NODE && node_online(node))
294 return first_node(node_online_map); /* Shouldn't happen */
299 * Fixup core topology information for
300 * (1) AMD multi-node processors
301 * Assumption: Number of cores in each internal node is the same.
302 * (2) AMD processors supporting compute units
305 static void amd_get_topology(struct cpuinfo_x86 *c)
308 int cpu = smp_processor_id();
310 /* get information required for multi-node processors */
311 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
312 u32 eax, ebx, ecx, edx;
314 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
316 node_id = ecx & 0xff;
317 smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
320 c->cu_id = ebx & 0xff;
322 if (c->x86 >= 0x17) {
323 c->cpu_core_id = ebx & 0xff;
325 if (smp_num_siblings > 1)
326 c->x86_max_cores /= smp_num_siblings;
330 * We may have multiple LLCs if L3 caches exist, so check if we
331 * have an L3 cache by looking at the L3 cache CPUID leaf.
333 if (cpuid_edx(0x80000006)) {
334 if (c->x86 == 0x17) {
336 * LLC is at the core complex level.
337 * Core complex id is ApicId[3].
339 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
341 /* LLC is at the node level. */
342 per_cpu(cpu_llc_id, cpu) = node_id;
345 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
348 rdmsrl(MSR_FAM10H_NODE_ID, value);
351 per_cpu(cpu_llc_id, cpu) = node_id;
355 /* fixup multi-node processor information */
356 if (nodes_per_socket > 1) {
359 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
360 cus_per_node = c->x86_max_cores / nodes_per_socket;
362 /* core id has to be in the [0 .. cores_per_node - 1] range */
363 c->cpu_core_id %= cus_per_node;
369 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
370 * Assumes number of cores is a power of two.
372 static void amd_detect_cmp(struct cpuinfo_x86 *c)
376 int cpu = smp_processor_id();
378 bits = c->x86_coreid_bits;
379 /* Low order bits define the core id (index of core in socket) */
380 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
381 /* Convert the initial APIC ID into the socket ID */
382 c->phys_proc_id = c->initial_apicid >> bits;
383 /* use socket ID also for last level cache */
384 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
389 u16 amd_get_nb_id(int cpu)
393 id = per_cpu(cpu_llc_id, cpu);
397 EXPORT_SYMBOL_GPL(amd_get_nb_id);
399 u32 amd_get_nodes_per_socket(void)
401 return nodes_per_socket;
403 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
405 static void srat_detect_node(struct cpuinfo_x86 *c)
408 int cpu = smp_processor_id();
410 unsigned apicid = c->apicid;
412 node = numa_cpu_node(cpu);
413 if (node == NUMA_NO_NODE)
414 node = per_cpu(cpu_llc_id, cpu);
417 * On multi-fabric platform (e.g. Numascale NumaChip) a
418 * platform-specific handler needs to be called to fixup some
421 if (x86_cpuinit.fixup_cpu_id)
422 x86_cpuinit.fixup_cpu_id(c, node);
424 if (!node_online(node)) {
426 * Two possibilities here:
428 * - The CPU is missing memory and no node was created. In
429 * that case try picking one from a nearby CPU.
431 * - The APIC IDs differ from the HyperTransport node IDs
432 * which the K8 northbridge parsing fills in. Assume
433 * they are all increased by a constant offset, but in
434 * the same order as the HT nodeids. If that doesn't
435 * result in a usable node fall back to the path for the
438 * This workaround operates directly on the mapping between
439 * APIC ID and NUMA node, assuming certain relationship
440 * between APIC ID, HT node ID and NUMA topology. As going
441 * through CPU mapping may alter the outcome, directly
442 * access __apicid_to_node[].
444 int ht_nodeid = c->initial_apicid;
446 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
447 node = __apicid_to_node[ht_nodeid];
448 /* Pick a nearby node */
449 if (!node_online(node))
450 node = nearby_node(apicid);
452 numa_set_node(cpu, node);
456 static void early_init_amd_mc(struct cpuinfo_x86 *c)
461 /* Multi core CPU? */
462 if (c->extended_cpuid_level < 0x80000008)
465 ecx = cpuid_ecx(0x80000008);
467 c->x86_max_cores = (ecx & 0xff) + 1;
469 /* CPU telling us the core id bits shift? */
470 bits = (ecx >> 12) & 0xF;
472 /* Otherwise recompute */
474 while ((1 << bits) < c->x86_max_cores)
478 c->x86_coreid_bits = bits;
482 static void bsp_init_amd(struct cpuinfo_x86 *c)
487 unsigned long long tseg;
490 * Split up direct mapping around the TSEG SMM area.
491 * Don't do it for gbpages because there seems very little
492 * benefit in doing so.
494 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
495 unsigned long pfn = tseg >> PAGE_SHIFT;
497 pr_debug("tseg: %010llx\n", tseg);
498 if (pfn_range_is_mapped(pfn, pfn + 1))
499 set_memory_4k((unsigned long)__va(tseg), 1);
504 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
507 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
510 rdmsrl(MSR_K7_HWCR, val);
511 if (!(val & BIT(24)))
512 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
516 if (c->x86 == 0x15) {
517 unsigned long upperbit;
520 cpuid = cpuid_edx(0x80000005);
521 assoc = cpuid >> 16 & 0xff;
522 upperbit = ((cpuid >> 24) << 10) / assoc;
524 va_align.mask = (upperbit - 1) & PAGE_MASK;
525 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
527 /* A random value per boot for bit slice [12:upper_bit) */
528 va_align.bits = get_random_int() & va_align.mask;
531 if (cpu_has(c, X86_FEATURE_MWAITX))
534 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
537 ecx = cpuid_ecx(0x8000001e);
538 nodes_per_socket = ((ecx >> 8) & 7) + 1;
539 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
542 rdmsrl(MSR_FAM10H_NODE_ID, value);
543 nodes_per_socket = ((value >> 3) & 7) + 1;
547 static void early_init_amd(struct cpuinfo_x86 *c)
549 early_init_amd_mc(c);
552 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
553 * with P/T states and does not stop in deep C-states
555 if (c->x86_power & (1 << 8)) {
556 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
557 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
558 if (!check_tsc_unstable())
559 set_sched_clock_stable();
562 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
563 if (c->x86_power & BIT(12))
564 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
567 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
569 /* Set MTRR capability flag if appropriate */
571 if (c->x86_model == 13 || c->x86_model == 9 ||
572 (c->x86_model == 8 && c->x86_mask >= 8))
573 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
575 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
577 * ApicID can always be treated as an 8-bit value for AMD APIC versions
578 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
579 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
582 if (boot_cpu_has(X86_FEATURE_APIC)) {
584 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
585 else if (c->x86 >= 0xf) {
586 /* check CPU config space for extended APIC ID */
589 val = read_pci_config(0, 24, 0, 0x68);
590 if ((val >> 17 & 0x3) == 0x3)
591 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
597 * This is only needed to tell the kernel whether to use VMCALL
598 * and VMMCALL. VMMCALL is never executed except under virt, so
599 * we can set it unconditionally.
601 set_cpu_cap(c, X86_FEATURE_VMMCALL);
603 /* F16h erratum 793, CVE-2013-6885 */
604 if (c->x86 == 0x16 && c->x86_model <= 0xf)
605 msr_set_bit(MSR_AMD64_LS_CFG, 15);
608 * Check whether the machine is affected by erratum 400. This is
609 * used to select the proper idle routine and to enable the check
610 * whether the machine is affected in arch_post_acpi_init(), which
611 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
613 if (cpu_has_amd_erratum(c, amd_erratum_400))
614 set_cpu_bug(c, X86_BUG_AMD_E400);
617 static void init_amd_k8(struct cpuinfo_x86 *c)
622 /* On C+ stepping K8 rep microcode works well for copy/memset */
623 level = cpuid_eax(1);
624 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
625 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
628 * Some BIOSes incorrectly force this feature, but only K8 revision D
629 * (model = 0x14) and later actually support it.
630 * (AMD Erratum #110, docId: 25759).
632 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
633 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
634 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
635 value &= ~BIT_64(32);
636 wrmsrl_amd_safe(0xc001100d, value);
640 if (!c->x86_model_id[0])
641 strcpy(c->x86_model_id, "Hammer");
645 * Disable TLB flush filter by setting HWCR.FFDIS on K8
646 * bit 6 of msr C001_0015
648 * Errata 63 for SH-B3 steppings
649 * Errata 122 for all steppings (F+ have it disabled by default)
651 msr_set_bit(MSR_K7_HWCR, 6);
653 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
656 static void init_amd_gh(struct cpuinfo_x86 *c)
659 /* do this for boot cpu */
660 if (c == &boot_cpu_data)
661 check_enable_amd_mmconf_dmi();
663 fam10h_check_enable_mmcfg();
667 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
668 * is always needed when GART is enabled, even in a kernel which has no
669 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
670 * If it doesn't, we do it here as suggested by the BKDG.
672 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
674 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
677 * On family 10h BIOS may not have properly enabled WC+ support, causing
678 * it to be converted to CD memtype. This may result in performance
679 * degradation for certain nested-paging guests. Prevent this conversion
680 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
682 * NOTE: we want to use the _safe accessors so as not to #GP kvm
683 * guests on older kvm hosts.
685 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
687 if (cpu_has_amd_erratum(c, amd_erratum_383))
688 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
691 #define MSR_AMD64_DE_CFG 0xC0011029
693 static void init_amd_ln(struct cpuinfo_x86 *c)
696 * Apply erratum 665 fix unconditionally so machines without a BIOS
699 msr_set_bit(MSR_AMD64_DE_CFG, 31);
702 static void init_amd_bd(struct cpuinfo_x86 *c)
706 /* re-enable TopologyExtensions if switched off by BIOS */
707 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
708 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
710 if (msr_set_bit(0xc0011005, 54) > 0) {
711 rdmsrl(0xc0011005, value);
712 if (value & BIT_64(54)) {
713 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
714 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
720 * The way access filter has a performance penalty on some workloads.
721 * Disable it on the affected CPUs.
723 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
724 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
726 wrmsrl_safe(MSR_F15H_IC_CFG, value);
731 static void init_amd(struct cpuinfo_x86 *c)
738 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
739 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
741 clear_cpu_cap(c, 0*32+31);
744 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
746 /* get apicid instead of initial apic id from cpuid */
747 c->apicid = hard_smp_processor_id();
749 /* K6s reports MCEs but don't actually have all the MSRs */
751 clear_cpu_cap(c, X86_FEATURE_MCE);
754 case 4: init_amd_k5(c); break;
755 case 5: init_amd_k6(c); break;
756 case 6: init_amd_k7(c); break;
757 case 0xf: init_amd_k8(c); break;
758 case 0x10: init_amd_gh(c); break;
759 case 0x12: init_amd_ln(c); break;
760 case 0x15: init_amd_bd(c); break;
763 /* Enable workaround for FXSAVE leak */
765 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
767 cpu_detect_cache_sizes(c);
769 /* Multi core CPU? */
770 if (c->extended_cpuid_level >= 0x80000008) {
779 init_amd_cacheinfo(c);
782 set_cpu_cap(c, X86_FEATURE_K8);
784 if (cpu_has(c, X86_FEATURE_XMM2)) {
785 /* MFENCE stops RDTSC speculation */
786 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
790 * Family 0x12 and above processors have APIC timer
791 * running in deep C states.
794 set_cpu_cap(c, X86_FEATURE_ARAT);
796 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
798 /* 3DNow or LM implies PREFETCHW */
799 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
800 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
801 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
803 /* AMD CPUs don't reset SS attributes on SYSRET */
804 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
808 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
810 /* AMD errata T13 (order #21922) */
813 if (c->x86_model == 3 && c->x86_mask == 0)
815 /* Tbird rev A1/A2 */
816 if (c->x86_model == 4 &&
817 (c->x86_mask == 0 || c->x86_mask == 1))
824 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
826 u32 ebx, eax, ecx, edx;
832 if (c->extended_cpuid_level < 0x80000006)
835 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
837 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
838 tlb_lli_4k[ENTRIES] = ebx & mask;
841 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
842 * characteristics from the CPUID function 0x80000005 instead.
845 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
849 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
850 if (!((eax >> 16) & mask))
851 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
853 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
855 /* a 4M entry uses two 2M entries */
856 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
858 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
861 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
862 tlb_lli_2m[ENTRIES] = 1024;
864 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
865 tlb_lli_2m[ENTRIES] = eax & 0xff;
868 tlb_lli_2m[ENTRIES] = eax & mask;
870 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
873 static const struct cpu_dev amd_cpu_dev = {
875 .c_ident = { "AuthenticAMD" },
878 { .family = 4, .model_names =
889 .legacy_cache_size = amd_size_cache,
891 .c_early_init = early_init_amd,
892 .c_detect_tlb = cpu_detect_tlb_amd,
893 .c_bsp_init = bsp_init_amd,
895 .c_x86_vendor = X86_VENDOR_AMD,
898 cpu_dev_register(amd_cpu_dev);
901 * AMD errata checking
903 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
904 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
905 * have an OSVW id assigned, which it takes as first argument. Both take a
906 * variable number of family-specific model-stepping ranges created by
911 * const int amd_erratum_319[] =
912 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
913 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
914 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
917 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
918 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
919 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
920 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
921 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
922 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
923 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
925 static const int amd_erratum_400[] =
926 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
927 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
929 static const int amd_erratum_383[] =
930 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
933 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
935 int osvw_id = *erratum++;
939 if (osvw_id >= 0 && osvw_id < 65536 &&
940 cpu_has(cpu, X86_FEATURE_OSVW)) {
943 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
944 if (osvw_id < osvw_len) {
947 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
949 return osvw_bits & (1ULL << (osvw_id & 0x3f));
953 /* OSVW unavailable or ID unknown, match family-model-stepping range */
954 ms = (cpu->x86_model << 4) | cpu->x86_mask;
955 while ((range = *erratum++))
956 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
957 (ms >= AMD_MODEL_RANGE_START(range)) &&
958 (ms <= AMD_MODEL_RANGE_END(range)))
964 void set_dr_addr_mask(unsigned long mask, int dr)
966 if (!boot_cpu_has(X86_FEATURE_BPEXT))
971 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
976 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);