]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/mm/tlb.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux.git] / arch / x86 / mm / tlb.c
1 #include <linux/init.h>
2
3 #include <linux/mm.h>
4 #include <linux/spinlock.h>
5 #include <linux/smp.h>
6 #include <linux/interrupt.h>
7 #include <linux/export.h>
8 #include <linux/cpu.h>
9 #include <linux/debugfs.h>
10
11 #include <asm/tlbflush.h>
12 #include <asm/mmu_context.h>
13 #include <asm/nospec-branch.h>
14 #include <asm/cache.h>
15 #include <asm/apic.h>
16 #include <asm/uv/uv.h>
17
18 /*
19  *      TLB flushing, formerly SMP-only
20  *              c/o Linus Torvalds.
21  *
22  *      These mean you can really definitely utterly forget about
23  *      writing to user space from interrupts. (Its not allowed anyway).
24  *
25  *      Optimizations Manfred Spraul <manfred@colorfullife.com>
26  *
27  *      More scalable flush, from Andi Kleen
28  *
29  *      Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
30  */
31
32 /*
33  * We get here when we do something requiring a TLB invalidation
34  * but could not go invalidate all of the contexts.  We do the
35  * necessary invalidation by clearing out the 'ctx_id' which
36  * forces a TLB flush when the context is loaded.
37  */
38 void clear_asid_other(void)
39 {
40         u16 asid;
41
42         /*
43          * This is only expected to be set if we have disabled
44          * kernel _PAGE_GLOBAL pages.
45          */
46         if (!static_cpu_has(X86_FEATURE_PTI)) {
47                 WARN_ON_ONCE(1);
48                 return;
49         }
50
51         for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
52                 /* Do not need to flush the current asid */
53                 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
54                         continue;
55                 /*
56                  * Make sure the next time we go to switch to
57                  * this asid, we do a flush:
58                  */
59                 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
60         }
61         this_cpu_write(cpu_tlbstate.invalidate_other, false);
62 }
63
64 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
65
66
67 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
68                             u16 *new_asid, bool *need_flush)
69 {
70         u16 asid;
71
72         if (!static_cpu_has(X86_FEATURE_PCID)) {
73                 *new_asid = 0;
74                 *need_flush = true;
75                 return;
76         }
77
78         if (this_cpu_read(cpu_tlbstate.invalidate_other))
79                 clear_asid_other();
80
81         for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
82                 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
83                     next->context.ctx_id)
84                         continue;
85
86                 *new_asid = asid;
87                 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
88                                next_tlb_gen);
89                 return;
90         }
91
92         /*
93          * We don't currently own an ASID slot on this CPU.
94          * Allocate a slot.
95          */
96         *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
97         if (*new_asid >= TLB_NR_DYN_ASIDS) {
98                 *new_asid = 0;
99                 this_cpu_write(cpu_tlbstate.next_asid, 1);
100         }
101         *need_flush = true;
102 }
103
104 static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
105 {
106         unsigned long new_mm_cr3;
107
108         if (need_flush) {
109                 invalidate_user_asid(new_asid);
110                 new_mm_cr3 = build_cr3(pgdir, new_asid);
111         } else {
112                 new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
113         }
114
115         /*
116          * Caution: many callers of this function expect
117          * that load_cr3() is serializing and orders TLB
118          * fills with respect to the mm_cpumask writes.
119          */
120         write_cr3(new_mm_cr3);
121 }
122
123 void leave_mm(int cpu)
124 {
125         struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
126
127         /*
128          * It's plausible that we're in lazy TLB mode while our mm is init_mm.
129          * If so, our callers still expect us to flush the TLB, but there
130          * aren't any user TLB entries in init_mm to worry about.
131          *
132          * This needs to happen before any other sanity checks due to
133          * intel_idle's shenanigans.
134          */
135         if (loaded_mm == &init_mm)
136                 return;
137
138         /* Warn if we're not lazy. */
139         WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
140
141         switch_mm(NULL, &init_mm, NULL);
142 }
143 EXPORT_SYMBOL_GPL(leave_mm);
144
145 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
146                struct task_struct *tsk)
147 {
148         unsigned long flags;
149
150         local_irq_save(flags);
151         switch_mm_irqs_off(prev, next, tsk);
152         local_irq_restore(flags);
153 }
154
155 static void sync_current_stack_to_mm(struct mm_struct *mm)
156 {
157         unsigned long sp = current_stack_pointer;
158         pgd_t *pgd = pgd_offset(mm, sp);
159
160         if (CONFIG_PGTABLE_LEVELS > 4) {
161                 if (unlikely(pgd_none(*pgd))) {
162                         pgd_t *pgd_ref = pgd_offset_k(sp);
163
164                         set_pgd(pgd, *pgd_ref);
165                 }
166         } else {
167                 /*
168                  * "pgd" is faked.  The top level entries are "p4d"s, so sync
169                  * the p4d.  This compiles to approximately the same code as
170                  * the 5-level case.
171                  */
172                 p4d_t *p4d = p4d_offset(pgd, sp);
173
174                 if (unlikely(p4d_none(*p4d))) {
175                         pgd_t *pgd_ref = pgd_offset_k(sp);
176                         p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
177
178                         set_p4d(p4d, *p4d_ref);
179                 }
180         }
181 }
182
183 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
184                         struct task_struct *tsk)
185 {
186         struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
187         u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
188         unsigned cpu = smp_processor_id();
189         u64 next_tlb_gen;
190
191         /*
192          * NB: The scheduler will call us with prev == next when switching
193          * from lazy TLB mode to normal mode if active_mm isn't changing.
194          * When this happens, we don't assume that CR3 (and hence
195          * cpu_tlbstate.loaded_mm) matches next.
196          *
197          * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
198          */
199
200         /* We don't want flush_tlb_func_* to run concurrently with us. */
201         if (IS_ENABLED(CONFIG_PROVE_LOCKING))
202                 WARN_ON_ONCE(!irqs_disabled());
203
204         /*
205          * Verify that CR3 is what we think it is.  This will catch
206          * hypothetical buggy code that directly switches to swapper_pg_dir
207          * without going through leave_mm() / switch_mm_irqs_off() or that
208          * does something like write_cr3(read_cr3_pa()).
209          *
210          * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
211          * isn't free.
212          */
213 #ifdef CONFIG_DEBUG_VM
214         if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
215                 /*
216                  * If we were to BUG here, we'd be very likely to kill
217                  * the system so hard that we don't see the call trace.
218                  * Try to recover instead by ignoring the error and doing
219                  * a global flush to minimize the chance of corruption.
220                  *
221                  * (This is far from being a fully correct recovery.
222                  *  Architecturally, the CPU could prefetch something
223                  *  back into an incorrect ASID slot and leave it there
224                  *  to cause trouble down the road.  It's better than
225                  *  nothing, though.)
226                  */
227                 __flush_tlb_all();
228         }
229 #endif
230         this_cpu_write(cpu_tlbstate.is_lazy, false);
231
232         if (real_prev == next) {
233                 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
234                            next->context.ctx_id);
235
236                 /*
237                  * We don't currently support having a real mm loaded without
238                  * our cpu set in mm_cpumask().  We have all the bookkeeping
239                  * in place to figure out whether we would need to flush
240                  * if our cpu were cleared in mm_cpumask(), but we don't
241                  * currently use it.
242                  */
243                 if (WARN_ON_ONCE(real_prev != &init_mm &&
244                                  !cpumask_test_cpu(cpu, mm_cpumask(next))))
245                         cpumask_set_cpu(cpu, mm_cpumask(next));
246
247                 return;
248         } else {
249                 u16 new_asid;
250                 bool need_flush;
251                 u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
252
253                 /*
254                  * Avoid user/user BTB poisoning by flushing the branch
255                  * predictor when switching between processes. This stops
256                  * one process from doing Spectre-v2 attacks on another.
257                  *
258                  * As an optimization, flush indirect branches only when
259                  * switching into processes that disable dumping. This
260                  * protects high value processes like gpg, without having
261                  * too high performance overhead. IBPB is *expensive*!
262                  *
263                  * This will not flush branches when switching into kernel
264                  * threads. It will also not flush if we switch to idle
265                  * thread and back to the same process. It will flush if we
266                  * switch to a different non-dumpable process.
267                  */
268                 if (tsk && tsk->mm &&
269                     tsk->mm->context.ctx_id != last_ctx_id &&
270                     get_dumpable(tsk->mm) != SUID_DUMP_USER)
271                         indirect_branch_prediction_barrier();
272
273                 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
274                         /*
275                          * If our current stack is in vmalloc space and isn't
276                          * mapped in the new pgd, we'll double-fault.  Forcibly
277                          * map it.
278                          */
279                         sync_current_stack_to_mm(next);
280                 }
281
282                 /* Stop remote flushes for the previous mm */
283                 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
284                                 real_prev != &init_mm);
285                 cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
286
287                 /*
288                  * Start remote flushes and then read tlb_gen.
289                  */
290                 cpumask_set_cpu(cpu, mm_cpumask(next));
291                 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
292
293                 choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
294
295                 if (need_flush) {
296                         this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
297                         this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
298                         load_new_mm_cr3(next->pgd, new_asid, true);
299
300                         /*
301                          * NB: This gets called via leave_mm() in the idle path
302                          * where RCU functions differently.  Tracing normally
303                          * uses RCU, so we need to use the _rcuidle variant.
304                          *
305                          * (There is no good reason for this.  The idle code should
306                          *  be rearranged to call this before rcu_idle_enter().)
307                          */
308                         trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
309                 } else {
310                         /* The new ASID is already up to date. */
311                         load_new_mm_cr3(next->pgd, new_asid, false);
312
313                         /* See above wrt _rcuidle. */
314                         trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
315                 }
316
317                 /*
318                  * Record last user mm's context id, so we can avoid
319                  * flushing branch buffer with IBPB if we switch back
320                  * to the same user.
321                  */
322                 if (next != &init_mm)
323                         this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
324
325                 this_cpu_write(cpu_tlbstate.loaded_mm, next);
326                 this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
327         }
328
329         load_mm_cr4(next);
330         switch_ldt(real_prev, next);
331 }
332
333 /*
334  * Please ignore the name of this function.  It should be called
335  * switch_to_kernel_thread().
336  *
337  * enter_lazy_tlb() is a hint from the scheduler that we are entering a
338  * kernel thread or other context without an mm.  Acceptable implementations
339  * include doing nothing whatsoever, switching to init_mm, or various clever
340  * lazy tricks to try to minimize TLB flushes.
341  *
342  * The scheduler reserves the right to call enter_lazy_tlb() several times
343  * in a row.  It will notify us that we're going back to a real mm by
344  * calling switch_mm_irqs_off().
345  */
346 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
347 {
348         if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
349                 return;
350
351         if (tlb_defer_switch_to_init_mm()) {
352                 /*
353                  * There's a significant optimization that may be possible
354                  * here.  We have accurate enough TLB flush tracking that we
355                  * don't need to maintain coherence of TLB per se when we're
356                  * lazy.  We do, however, need to maintain coherence of
357                  * paging-structure caches.  We could, in principle, leave our
358                  * old mm loaded and only switch to init_mm when
359                  * tlb_remove_page() happens.
360                  */
361                 this_cpu_write(cpu_tlbstate.is_lazy, true);
362         } else {
363                 switch_mm(NULL, &init_mm, NULL);
364         }
365 }
366
367 /*
368  * Call this when reinitializing a CPU.  It fixes the following potential
369  * problems:
370  *
371  * - The ASID changed from what cpu_tlbstate thinks it is (most likely
372  *   because the CPU was taken down and came back up with CR3's PCID
373  *   bits clear.  CPU hotplug can do this.
374  *
375  * - The TLB contains junk in slots corresponding to inactive ASIDs.
376  *
377  * - The CPU went so far out to lunch that it may have missed a TLB
378  *   flush.
379  */
380 void initialize_tlbstate_and_flush(void)
381 {
382         int i;
383         struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
384         u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
385         unsigned long cr3 = __read_cr3();
386
387         /* Assert that CR3 already references the right mm. */
388         WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
389
390         /*
391          * Assert that CR4.PCIDE is set if needed.  (CR4.PCIDE initialization
392          * doesn't work like other CR4 bits because it can only be set from
393          * long mode.)
394          */
395         WARN_ON(boot_cpu_has(X86_FEATURE_PCID) &&
396                 !(cr4_read_shadow() & X86_CR4_PCIDE));
397
398         /* Force ASID 0 and force a TLB flush. */
399         write_cr3(build_cr3(mm->pgd, 0));
400
401         /* Reinitialize tlbstate. */
402         this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id);
403         this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
404         this_cpu_write(cpu_tlbstate.next_asid, 1);
405         this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
406         this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
407
408         for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
409                 this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
410 }
411
412 /*
413  * flush_tlb_func_common()'s memory ordering requirement is that any
414  * TLB fills that happen after we flush the TLB are ordered after we
415  * read active_mm's tlb_gen.  We don't need any explicit barriers
416  * because all x86 flush operations are serializing and the
417  * atomic64_read operation won't be reordered by the compiler.
418  */
419 static void flush_tlb_func_common(const struct flush_tlb_info *f,
420                                   bool local, enum tlb_flush_reason reason)
421 {
422         /*
423          * We have three different tlb_gen values in here.  They are:
424          *
425          * - mm_tlb_gen:     the latest generation.
426          * - local_tlb_gen:  the generation that this CPU has already caught
427          *                   up to.
428          * - f->new_tlb_gen: the generation that the requester of the flush
429          *                   wants us to catch up to.
430          */
431         struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
432         u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
433         u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
434         u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
435
436         /* This code cannot presently handle being reentered. */
437         VM_WARN_ON(!irqs_disabled());
438
439         if (unlikely(loaded_mm == &init_mm))
440                 return;
441
442         VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
443                    loaded_mm->context.ctx_id);
444
445         if (this_cpu_read(cpu_tlbstate.is_lazy)) {
446                 /*
447                  * We're in lazy mode.  We need to at least flush our
448                  * paging-structure cache to avoid speculatively reading
449                  * garbage into our TLB.  Since switching to init_mm is barely
450                  * slower than a minimal flush, just switch to init_mm.
451                  */
452                 switch_mm_irqs_off(NULL, &init_mm, NULL);
453                 return;
454         }
455
456         if (unlikely(local_tlb_gen == mm_tlb_gen)) {
457                 /*
458                  * There's nothing to do: we're already up to date.  This can
459                  * happen if two concurrent flushes happen -- the first flush to
460                  * be handled can catch us all the way up, leaving no work for
461                  * the second flush.
462                  */
463                 trace_tlb_flush(reason, 0);
464                 return;
465         }
466
467         WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
468         WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
469
470         /*
471          * If we get to this point, we know that our TLB is out of date.
472          * This does not strictly imply that we need to flush (it's
473          * possible that f->new_tlb_gen <= local_tlb_gen), but we're
474          * going to need to flush in the very near future, so we might
475          * as well get it over with.
476          *
477          * The only question is whether to do a full or partial flush.
478          *
479          * We do a partial flush if requested and two extra conditions
480          * are met:
481          *
482          * 1. f->new_tlb_gen == local_tlb_gen + 1.  We have an invariant that
483          *    we've always done all needed flushes to catch up to
484          *    local_tlb_gen.  If, for example, local_tlb_gen == 2 and
485          *    f->new_tlb_gen == 3, then we know that the flush needed to bring
486          *    us up to date for tlb_gen 3 is the partial flush we're
487          *    processing.
488          *
489          *    As an example of why this check is needed, suppose that there
490          *    are two concurrent flushes.  The first is a full flush that
491          *    changes context.tlb_gen from 1 to 2.  The second is a partial
492          *    flush that changes context.tlb_gen from 2 to 3.  If they get
493          *    processed on this CPU in reverse order, we'll see
494          *     local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
495          *    If we were to use __flush_tlb_single() and set local_tlb_gen to
496          *    3, we'd be break the invariant: we'd update local_tlb_gen above
497          *    1 without the full flush that's needed for tlb_gen 2.
498          *
499          * 2. f->new_tlb_gen == mm_tlb_gen.  This is purely an optimiation.
500          *    Partial TLB flushes are not all that much cheaper than full TLB
501          *    flushes, so it seems unlikely that it would be a performance win
502          *    to do a partial flush if that won't bring our TLB fully up to
503          *    date.  By doing a full flush instead, we can increase
504          *    local_tlb_gen all the way to mm_tlb_gen and we can probably
505          *    avoid another flush in the very near future.
506          */
507         if (f->end != TLB_FLUSH_ALL &&
508             f->new_tlb_gen == local_tlb_gen + 1 &&
509             f->new_tlb_gen == mm_tlb_gen) {
510                 /* Partial flush */
511                 unsigned long addr;
512                 unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
513
514                 addr = f->start;
515                 while (addr < f->end) {
516                         __flush_tlb_single(addr);
517                         addr += PAGE_SIZE;
518                 }
519                 if (local)
520                         count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
521                 trace_tlb_flush(reason, nr_pages);
522         } else {
523                 /* Full flush. */
524                 local_flush_tlb();
525                 if (local)
526                         count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
527                 trace_tlb_flush(reason, TLB_FLUSH_ALL);
528         }
529
530         /* Both paths above update our state to mm_tlb_gen. */
531         this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
532 }
533
534 static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
535 {
536         const struct flush_tlb_info *f = info;
537
538         flush_tlb_func_common(f, true, reason);
539 }
540
541 static void flush_tlb_func_remote(void *info)
542 {
543         const struct flush_tlb_info *f = info;
544
545         inc_irq_stat(irq_tlb_count);
546
547         if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
548                 return;
549
550         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
551         flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
552 }
553
554 void native_flush_tlb_others(const struct cpumask *cpumask,
555                              const struct flush_tlb_info *info)
556 {
557         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
558         if (info->end == TLB_FLUSH_ALL)
559                 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
560         else
561                 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
562                                 (info->end - info->start) >> PAGE_SHIFT);
563
564         if (is_uv_system()) {
565                 /*
566                  * This whole special case is confused.  UV has a "Broadcast
567                  * Assist Unit", which seems to be a fancy way to send IPIs.
568                  * Back when x86 used an explicit TLB flush IPI, UV was
569                  * optimized to use its own mechanism.  These days, x86 uses
570                  * smp_call_function_many(), but UV still uses a manual IPI,
571                  * and that IPI's action is out of date -- it does a manual
572                  * flush instead of calling flush_tlb_func_remote().  This
573                  * means that the percpu tlb_gen variables won't be updated
574                  * and we'll do pointless flushes on future context switches.
575                  *
576                  * Rather than hooking native_flush_tlb_others() here, I think
577                  * that UV should be updated so that smp_call_function_many(),
578                  * etc, are optimal on UV.
579                  */
580                 unsigned int cpu;
581
582                 cpu = smp_processor_id();
583                 cpumask = uv_flush_tlb_others(cpumask, info);
584                 if (cpumask)
585                         smp_call_function_many(cpumask, flush_tlb_func_remote,
586                                                (void *)info, 1);
587                 return;
588         }
589         smp_call_function_many(cpumask, flush_tlb_func_remote,
590                                (void *)info, 1);
591 }
592
593 /*
594  * See Documentation/x86/tlb.txt for details.  We choose 33
595  * because it is large enough to cover the vast majority (at
596  * least 95%) of allocations, and is small enough that we are
597  * confident it will not cause too much overhead.  Each single
598  * flush is about 100 ns, so this caps the maximum overhead at
599  * _about_ 3,000 ns.
600  *
601  * This is in units of pages.
602  */
603 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
604
605 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
606                                 unsigned long end, unsigned long vmflag)
607 {
608         int cpu;
609
610         struct flush_tlb_info info = {
611                 .mm = mm,
612         };
613
614         cpu = get_cpu();
615
616         /* This is also a barrier that synchronizes with switch_mm(). */
617         info.new_tlb_gen = inc_mm_tlb_gen(mm);
618
619         /* Should we flush just the requested range? */
620         if ((end != TLB_FLUSH_ALL) &&
621             !(vmflag & VM_HUGETLB) &&
622             ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
623                 info.start = start;
624                 info.end = end;
625         } else {
626                 info.start = 0UL;
627                 info.end = TLB_FLUSH_ALL;
628         }
629
630         if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
631                 VM_WARN_ON(irqs_disabled());
632                 local_irq_disable();
633                 flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
634                 local_irq_enable();
635         }
636
637         if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
638                 flush_tlb_others(mm_cpumask(mm), &info);
639
640         put_cpu();
641 }
642
643
644 static void do_flush_tlb_all(void *info)
645 {
646         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
647         __flush_tlb_all();
648 }
649
650 void flush_tlb_all(void)
651 {
652         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
653         on_each_cpu(do_flush_tlb_all, NULL, 1);
654 }
655
656 static void do_kernel_range_flush(void *info)
657 {
658         struct flush_tlb_info *f = info;
659         unsigned long addr;
660
661         /* flush range by one by one 'invlpg' */
662         for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
663                 __flush_tlb_one(addr);
664 }
665
666 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
667 {
668
669         /* Balance as user space task's flush, a bit conservative */
670         if (end == TLB_FLUSH_ALL ||
671             (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
672                 on_each_cpu(do_flush_tlb_all, NULL, 1);
673         } else {
674                 struct flush_tlb_info info;
675                 info.start = start;
676                 info.end = end;
677                 on_each_cpu(do_kernel_range_flush, &info, 1);
678         }
679 }
680
681 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
682 {
683         struct flush_tlb_info info = {
684                 .mm = NULL,
685                 .start = 0UL,
686                 .end = TLB_FLUSH_ALL,
687         };
688
689         int cpu = get_cpu();
690
691         if (cpumask_test_cpu(cpu, &batch->cpumask)) {
692                 VM_WARN_ON(irqs_disabled());
693                 local_irq_disable();
694                 flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
695                 local_irq_enable();
696         }
697
698         if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
699                 flush_tlb_others(&batch->cpumask, &info);
700
701         cpumask_clear(&batch->cpumask);
702
703         put_cpu();
704 }
705
706 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
707                              size_t count, loff_t *ppos)
708 {
709         char buf[32];
710         unsigned int len;
711
712         len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
713         return simple_read_from_buffer(user_buf, count, ppos, buf, len);
714 }
715
716 static ssize_t tlbflush_write_file(struct file *file,
717                  const char __user *user_buf, size_t count, loff_t *ppos)
718 {
719         char buf[32];
720         ssize_t len;
721         int ceiling;
722
723         len = min(count, sizeof(buf) - 1);
724         if (copy_from_user(buf, user_buf, len))
725                 return -EFAULT;
726
727         buf[len] = '\0';
728         if (kstrtoint(buf, 0, &ceiling))
729                 return -EINVAL;
730
731         if (ceiling < 0)
732                 return -EINVAL;
733
734         tlb_single_page_flush_ceiling = ceiling;
735         return count;
736 }
737
738 static const struct file_operations fops_tlbflush = {
739         .read = tlbflush_read_file,
740         .write = tlbflush_write_file,
741         .llseek = default_llseek,
742 };
743
744 static int __init create_tlb_single_page_flush_ceiling(void)
745 {
746         debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
747                             arch_debugfs_dir, NULL, &fops_tlbflush);
748         return 0;
749 }
750 late_initcall(create_tlb_single_page_flush_ceiling);