1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/cpumask.h>
8 static inline int __prepare_ICR2(unsigned int mask)
10 return SET_APIC_DEST_FIELD(mask);
13 static inline void __xapic_wait_icr_idle(void)
15 while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
19 void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
22 * Subtle. In the case of the 'never do double writes' workaround
23 * we have to lock out interrupts to be safe. As we don't care
24 * of the value read we use an atomic rmw access to avoid costly
25 * cli/sti. Otherwise we use an even cheaper single atomic write
33 __xapic_wait_icr_idle();
36 * No need to touch the target chip field. Also the destination
37 * mode is ignored when a shorthand is used.
39 cfg = __prepare_ICR(shortcut, vector, 0);
42 * Send the IPI. The write to APIC_ICR fires this off.
44 native_apic_mem_write(APIC_ICR, cfg);
48 * This is used to send an IPI with no shorthand notation (the destination is
49 * specified in bits 56 to 63 of the ICR).
51 void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
58 if (unlikely(vector == NMI_VECTOR))
59 safe_apic_wait_icr_idle();
61 __xapic_wait_icr_idle();
64 * prepare target chip field
66 cfg = __prepare_ICR2(mask);
67 native_apic_mem_write(APIC_ICR2, cfg);
72 cfg = __prepare_ICR(0, vector, dest);
75 * Send the IPI. The write to APIC_ICR fires this off.
77 native_apic_mem_write(APIC_ICR, cfg);
80 void default_send_IPI_single_phys(int cpu, int vector)
84 local_irq_save(flags);
85 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
86 vector, APIC_DEST_PHYSICAL);
87 local_irq_restore(flags);
90 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
92 unsigned long query_cpu;
96 * Hack. The clustered APIC addressing mode doesn't allow us to send
97 * to an arbitrary mask, so I do a unicast to each CPU instead.
100 local_irq_save(flags);
101 for_each_cpu(query_cpu, mask) {
102 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
103 query_cpu), vector, APIC_DEST_PHYSICAL);
105 local_irq_restore(flags);
108 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
111 unsigned int this_cpu = smp_processor_id();
112 unsigned int query_cpu;
115 /* See Hack comment above */
117 local_irq_save(flags);
118 for_each_cpu(query_cpu, mask) {
119 if (query_cpu == this_cpu)
121 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
122 query_cpu), vector, APIC_DEST_PHYSICAL);
124 local_irq_restore(flags);
128 * Helper function for APICs which insist on cpumasks
130 void default_send_IPI_single(int cpu, int vector)
132 apic->send_IPI_mask(cpumask_of(cpu), vector);
137 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
141 unsigned int query_cpu;
144 * Hack. The clustered APIC addressing mode doesn't allow us to send
145 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
146 * should be modified to do 1 message per cluster ID - mbligh
149 local_irq_save(flags);
150 for_each_cpu(query_cpu, mask)
151 __default_send_IPI_dest_field(
152 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
153 vector, apic->dest_logical);
154 local_irq_restore(flags);
157 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
161 unsigned int query_cpu;
162 unsigned int this_cpu = smp_processor_id();
164 /* See Hack comment above */
166 local_irq_save(flags);
167 for_each_cpu(query_cpu, mask) {
168 if (query_cpu == this_cpu)
170 __default_send_IPI_dest_field(
171 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
172 vector, apic->dest_logical);
174 local_irq_restore(flags);
178 * This is only used on smaller machines.
180 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
182 unsigned long mask = cpumask_bits(cpumask)[0];
188 local_irq_save(flags);
189 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
190 __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
191 local_irq_restore(flags);
194 void default_send_IPI_allbutself(int vector)
197 * if there are no other CPUs in the system then we get an APIC send
198 * error if we try to broadcast, thus avoid sending IPIs in this case.
200 if (num_online_cpus() < 2)
203 if (no_broadcast || vector == NMI_VECTOR) {
204 apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
206 __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
210 void default_send_IPI_all(int vector)
212 if (no_broadcast || vector == NMI_VECTOR) {
213 apic->send_IPI_mask(cpu_online_mask, vector);
215 __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
219 void default_send_IPI_self(int vector)
221 __default_send_IPI_shortcut(APIC_DEST_SELF, vector);
224 /* must come after the send_IPI functions above for inlining */
225 static int convert_apicid_to_cpu(int apic_id)
229 for_each_possible_cpu(i) {
230 if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
236 int safe_smp_processor_id(void)
240 if (!boot_cpu_has(X86_FEATURE_APIC))
243 apicid = hard_smp_processor_id();
244 if (apicid == BAD_APICID)
247 cpuid = convert_apicid_to_cpu(apicid);
249 return cpuid >= 0 ? cpuid : 0;