2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/swapops.h>
17 #include <linux/sysctl.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
26 #include <asm/page-states.h>
28 static inline pte_t ptep_flush_direct(struct mm_struct *mm,
29 unsigned long addr, pte_t *ptep)
34 if (unlikely(pte_val(old) & _PAGE_INVALID))
36 atomic_inc(&mm->context.flush_count);
37 if (MACHINE_HAS_TLB_LC &&
38 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
39 __ptep_ipte(addr, ptep, IPTE_LOCAL);
41 __ptep_ipte(addr, ptep, IPTE_GLOBAL);
42 atomic_dec(&mm->context.flush_count);
46 static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
47 unsigned long addr, pte_t *ptep)
52 if (unlikely(pte_val(old) & _PAGE_INVALID))
54 atomic_inc(&mm->context.flush_count);
55 if (cpumask_equal(&mm->context.cpu_attach_mask,
56 cpumask_of(smp_processor_id()))) {
57 pte_val(*ptep) |= _PAGE_INVALID;
58 mm->context.flush_mm = 1;
60 __ptep_ipte(addr, ptep, IPTE_GLOBAL);
61 atomic_dec(&mm->context.flush_count);
65 static inline pgste_t pgste_get_lock(pte_t *ptep)
67 unsigned long new = 0;
74 " nihh %0,0xff7f\n" /* clear PCL bit in old */
75 " oihh %1,0x0080\n" /* set PCL bit in new */
78 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
79 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
84 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
88 " nihh %1,0xff7f\n" /* clear PCL bit */
90 : "=Q" (ptep[PTRS_PER_PTE])
91 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
96 static inline pgste_t pgste_get(pte_t *ptep)
98 unsigned long pgste = 0;
100 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
102 return __pgste(pgste);
105 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
108 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
112 static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
113 struct mm_struct *mm)
116 unsigned long address, bits, skey;
118 if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID)
120 address = pte_val(pte) & PAGE_MASK;
121 skey = (unsigned long) page_get_storage_key(address);
122 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
123 /* Transfer page changed & referenced bit to guest bits in pgste */
124 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
125 /* Copy page access key and fetch protection bit to pgste */
126 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
127 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
133 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
134 struct mm_struct *mm)
137 unsigned long address;
140 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
142 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
143 address = pte_val(entry) & PAGE_MASK;
145 * Set page access key and fetch protection bit from pgste.
146 * The guest C/R information is still in the PGSTE, set real
149 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
150 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
151 page_set_storage_key(address, nkey, 0);
155 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
158 if ((pte_val(entry) & _PAGE_PRESENT) &&
159 (pte_val(entry) & _PAGE_WRITE) &&
160 !(pte_val(entry) & _PAGE_INVALID)) {
161 if (!MACHINE_HAS_ESOP) {
163 * Without enhanced suppression-on-protection force
164 * the dirty bit on for all writable ptes.
166 pte_val(entry) |= _PAGE_DIRTY;
167 pte_val(entry) &= ~_PAGE_PROTECT;
169 if (!(pte_val(entry) & _PAGE_PROTECT))
170 /* This pte allows write access, set user-dirty */
171 pgste_val(pgste) |= PGSTE_UC_BIT;
178 static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
180 pte_t *ptep, pgste_t pgste)
185 bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
187 pgste_val(pgste) ^= bits;
188 ptep_notify(mm, addr, ptep, bits);
194 static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
195 unsigned long addr, pte_t *ptep)
197 pgste_t pgste = __pgste(0);
199 if (mm_has_pgste(mm)) {
200 pgste = pgste_get_lock(ptep);
201 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
206 static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
207 unsigned long addr, pte_t *ptep,
208 pgste_t pgste, pte_t old, pte_t new)
210 if (mm_has_pgste(mm)) {
211 if (pte_val(old) & _PAGE_INVALID)
212 pgste_set_key(ptep, pgste, new, mm);
213 if (pte_val(new) & _PAGE_INVALID) {
214 pgste = pgste_update_all(old, pgste, mm);
215 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
216 _PGSTE_GPS_USAGE_UNUSED)
217 pte_val(old) |= _PAGE_UNUSED;
219 pgste = pgste_set_pte(ptep, pgste, new);
220 pgste_set_unlock(ptep, pgste);
227 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
228 pte_t *ptep, pte_t new)
234 pgste = ptep_xchg_start(mm, addr, ptep);
235 old = ptep_flush_direct(mm, addr, ptep);
236 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
240 EXPORT_SYMBOL(ptep_xchg_direct);
242 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
243 pte_t *ptep, pte_t new)
249 pgste = ptep_xchg_start(mm, addr, ptep);
250 old = ptep_flush_lazy(mm, addr, ptep);
251 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
255 EXPORT_SYMBOL(ptep_xchg_lazy);
257 pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
264 pgste = ptep_xchg_start(mm, addr, ptep);
265 old = ptep_flush_lazy(mm, addr, ptep);
266 if (mm_has_pgste(mm)) {
267 pgste = pgste_update_all(old, pgste, mm);
268 pgste_set(ptep, pgste);
272 EXPORT_SYMBOL(ptep_modify_prot_start);
274 void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
275 pte_t *ptep, pte_t pte)
280 pte_val(pte) &= ~_PAGE_NOEXEC;
281 if (mm_has_pgste(mm)) {
282 pgste = pgste_get(ptep);
283 pgste_set_key(ptep, pgste, pte, mm);
284 pgste = pgste_set_pte(ptep, pgste, pte);
285 pgste_set_unlock(ptep, pgste);
291 EXPORT_SYMBOL(ptep_modify_prot_commit);
293 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
294 unsigned long addr, pmd_t *pmdp)
299 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
301 if (!MACHINE_HAS_IDTE) {
305 atomic_inc(&mm->context.flush_count);
306 if (MACHINE_HAS_TLB_LC &&
307 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
308 __pmdp_idte(addr, pmdp, IDTE_LOCAL);
310 __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
311 atomic_dec(&mm->context.flush_count);
315 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
316 unsigned long addr, pmd_t *pmdp)
321 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
323 atomic_inc(&mm->context.flush_count);
324 if (cpumask_equal(&mm->context.cpu_attach_mask,
325 cpumask_of(smp_processor_id()))) {
326 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
327 mm->context.flush_mm = 1;
328 } else if (MACHINE_HAS_IDTE)
329 __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
332 atomic_dec(&mm->context.flush_count);
336 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
337 pmd_t *pmdp, pmd_t new)
342 old = pmdp_flush_direct(mm, addr, pmdp);
347 EXPORT_SYMBOL(pmdp_xchg_direct);
349 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
350 pmd_t *pmdp, pmd_t new)
355 old = pmdp_flush_lazy(mm, addr, pmdp);
360 EXPORT_SYMBOL(pmdp_xchg_lazy);
362 static inline pud_t pudp_flush_direct(struct mm_struct *mm,
363 unsigned long addr, pud_t *pudp)
368 if (pud_val(old) & _REGION_ENTRY_INVALID)
370 if (!MACHINE_HAS_IDTE) {
372 * Invalid bit position is the same for pmd and pud, so we can
373 * re-use _pmd_csp() here
375 __pmdp_csp((pmd_t *) pudp);
378 atomic_inc(&mm->context.flush_count);
379 if (MACHINE_HAS_TLB_LC &&
380 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
381 __pudp_idte(addr, pudp, IDTE_LOCAL);
383 __pudp_idte(addr, pudp, IDTE_GLOBAL);
384 atomic_dec(&mm->context.flush_count);
388 pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
389 pud_t *pudp, pud_t new)
394 old = pudp_flush_direct(mm, addr, pudp);
399 EXPORT_SYMBOL(pudp_xchg_direct);
401 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
402 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
405 struct list_head *lh = (struct list_head *) pgtable;
407 assert_spin_locked(pmd_lockptr(mm, pmdp));
410 if (!pmd_huge_pte(mm, pmdp))
413 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
414 pmd_huge_pte(mm, pmdp) = pgtable;
417 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
419 struct list_head *lh;
423 assert_spin_locked(pmd_lockptr(mm, pmdp));
426 pgtable = pmd_huge_pte(mm, pmdp);
427 lh = (struct list_head *) pgtable;
429 pmd_huge_pte(mm, pmdp) = NULL;
431 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
434 ptep = (pte_t *) pgtable;
435 pte_val(*ptep) = _PAGE_INVALID;
437 pte_val(*ptep) = _PAGE_INVALID;
440 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
443 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
444 pte_t *ptep, pte_t entry)
448 /* the mm_has_pgste() check is done in set_pte_at() */
450 pgste = pgste_get_lock(ptep);
451 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
452 pgste_set_key(ptep, pgste, entry, mm);
453 pgste = pgste_set_pte(ptep, pgste, entry);
454 pgste_set_unlock(ptep, pgste);
458 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
463 pgste = pgste_get_lock(ptep);
464 pgste_val(pgste) |= PGSTE_IN_BIT;
465 pgste_set_unlock(ptep, pgste);
470 * ptep_force_prot - change access rights of a locked pte
471 * @mm: pointer to the process mm_struct
472 * @addr: virtual address in the guest address space
473 * @ptep: pointer to the page table entry
474 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
475 * @bit: pgste bit to set (e.g. for notification)
477 * Returns 0 if the access rights were changed and -EAGAIN if the current
478 * and requested access rights are incompatible.
480 int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
481 pte_t *ptep, int prot, unsigned long bit)
487 pgste = pgste_get_lock(ptep);
489 /* Check pte entry after all locks have been acquired */
490 pte_i = pte_val(entry) & _PAGE_INVALID;
491 pte_p = pte_val(entry) & _PAGE_PROTECT;
492 if ((pte_i && (prot != PROT_NONE)) ||
493 (pte_p && (prot & PROT_WRITE))) {
494 pgste_set_unlock(ptep, pgste);
497 /* Change access rights and set pgste bit */
498 if (prot == PROT_NONE && !pte_i) {
499 ptep_flush_direct(mm, addr, ptep);
500 pgste = pgste_update_all(entry, pgste, mm);
501 pte_val(entry) |= _PAGE_INVALID;
503 if (prot == PROT_READ && !pte_p) {
504 ptep_flush_direct(mm, addr, ptep);
505 pte_val(entry) &= ~_PAGE_INVALID;
506 pte_val(entry) |= _PAGE_PROTECT;
508 pgste_val(pgste) |= bit;
509 pgste = pgste_set_pte(ptep, pgste, entry);
510 pgste_set_unlock(ptep, pgste);
514 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
515 pte_t *sptep, pte_t *tptep, pte_t pte)
517 pgste_t spgste, tpgste;
521 if (!(pte_val(*tptep) & _PAGE_INVALID))
522 return 0; /* already shadowed */
523 spgste = pgste_get_lock(sptep);
525 if (!(pte_val(spte) & _PAGE_INVALID) &&
526 !((pte_val(spte) & _PAGE_PROTECT) &&
527 !(pte_val(pte) & _PAGE_PROTECT))) {
528 pgste_val(spgste) |= PGSTE_VSIE_BIT;
529 tpgste = pgste_get_lock(tptep);
530 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
531 (pte_val(pte) & _PAGE_PROTECT);
532 /* don't touch the storage key - it belongs to parent pgste */
533 tpgste = pgste_set_pte(tptep, tpgste, tpte);
534 pgste_set_unlock(tptep, tpgste);
537 pgste_set_unlock(sptep, spgste);
541 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
545 pgste = pgste_get_lock(ptep);
546 /* notifier is called by the caller */
547 ptep_flush_direct(mm, saddr, ptep);
548 /* don't touch the storage key - it belongs to parent pgste */
549 pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
550 pgste_set_unlock(ptep, pgste);
553 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
555 if (!non_swap_entry(entry))
556 dec_mm_counter(mm, MM_SWAPENTS);
557 else if (is_migration_entry(entry)) {
558 struct page *page = migration_entry_to_page(entry);
560 dec_mm_counter(mm, mm_counter(page));
562 free_swap_and_cache(entry);
565 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
566 pte_t *ptep, int reset)
568 unsigned long pgstev;
572 /* Zap unused and logically-zero pages */
574 pgste = pgste_get_lock(ptep);
575 pgstev = pgste_val(pgste);
577 if (!reset && pte_swap(pte) &&
578 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
579 (pgstev & _PGSTE_GPS_ZERO))) {
580 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
581 pte_clear(mm, addr, ptep);
584 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
585 pgste_set_unlock(ptep, pgste);
589 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
594 /* Clear storage key ACC and F, but set R/C */
596 pgste = pgste_get_lock(ptep);
597 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
598 pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
599 ptev = pte_val(*ptep);
600 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
601 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
602 pgste_set_unlock(ptep, pgste);
607 * Test and reset if a guest page is dirty
609 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
621 pgd = pgd_offset(mm, addr);
622 p4d = p4d_alloc(mm, pgd, addr);
625 pud = pud_alloc(mm, p4d, addr);
628 pmd = pmd_alloc(mm, pud, addr);
631 /* We can't run guests backed by huge pages, but userspace can
632 * still set them up and then try to migrate them without any
638 ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
642 pgste = pgste_get_lock(ptep);
643 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
644 pgste_val(pgste) &= ~PGSTE_UC_BIT;
646 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
647 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
648 __ptep_ipte(addr, ptep, IPTE_GLOBAL);
649 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
650 pte_val(pte) |= _PAGE_PROTECT;
652 pte_val(pte) |= _PAGE_INVALID;
655 pgste_set_unlock(ptep, pgste);
660 EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
662 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
663 unsigned char key, bool nq)
670 ptep = get_locked_pte(mm, addr, &ptl);
674 new = old = pgste_get_lock(ptep);
675 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
676 PGSTE_ACC_BITS | PGSTE_FP_BIT);
677 keyul = (unsigned long) key;
678 pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
679 pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
680 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
681 unsigned long address, bits, skey;
683 address = pte_val(*ptep) & PAGE_MASK;
684 skey = (unsigned long) page_get_storage_key(address);
685 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
686 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
687 /* Set storage key ACC and FP */
688 page_set_storage_key(address, skey, !nq);
689 /* Merge host changed & referenced into pgste */
690 pgste_val(new) |= bits << 52;
692 /* changing the guest storage key is considered a change of the page */
693 if ((pgste_val(new) ^ pgste_val(old)) &
694 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
695 pgste_val(new) |= PGSTE_UC_BIT;
697 pgste_set_unlock(ptep, new);
698 pte_unmap_unlock(ptep, ptl);
701 EXPORT_SYMBOL(set_guest_storage_key);
704 * Conditionally set a guest storage key (handling csske).
705 * oldkey will be updated when either mr or mc is set and a pointer is given.
707 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
708 * storage key was updated and -EFAULT on access errors.
710 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
711 unsigned char key, unsigned char *oldkey,
712 bool nq, bool mr, bool mc)
714 unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
717 /* we can drop the pgste lock between getting and setting the key */
719 rc = get_guest_storage_key(current->mm, addr, &tmp);
725 mask |= _PAGE_REFERENCED;
727 mask |= _PAGE_CHANGED;
728 if (!((tmp ^ key) & mask))
731 rc = set_guest_storage_key(current->mm, addr, key, nq);
732 return rc < 0 ? rc : 1;
734 EXPORT_SYMBOL(cond_set_guest_storage_key);
737 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
739 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
741 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
748 ptep = get_locked_pte(mm, addr, &ptl);
752 new = old = pgste_get_lock(ptep);
753 /* Reset guest reference bit only */
754 pgste_val(new) &= ~PGSTE_GR_BIT;
756 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
757 cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
758 /* Merge real referenced bit into host-set */
759 pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
761 /* Reflect guest's logical view, not physical */
762 cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
763 /* Changing the guest storage key is considered a change of the page */
764 if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
765 pgste_val(new) |= PGSTE_UC_BIT;
767 pgste_set_unlock(ptep, new);
768 pte_unmap_unlock(ptep, ptl);
771 EXPORT_SYMBOL(reset_guest_reference_bit);
773 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
780 ptep = get_locked_pte(mm, addr, &ptl);
784 pgste = pgste_get_lock(ptep);
785 *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
786 if (!(pte_val(*ptep) & _PAGE_INVALID))
787 *key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
788 /* Reflect guest's logical view, not physical */
789 *key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
790 pgste_set_unlock(ptep, pgste);
791 pte_unmap_unlock(ptep, ptl);
794 EXPORT_SYMBOL(get_guest_storage_key);
797 * pgste_perform_essa - perform ESSA actions on the PGSTE.
798 * @mm: the memory context. It must have PGSTEs, no check is performed here!
799 * @hva: the host virtual address of the page whose PGSTE is to be processed
800 * @orc: the specific action to perform, see the ESSA_SET_* macros.
801 * @oldpte: the PTE will be saved there if the pointer is not NULL.
802 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
804 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
805 * or < 0 in case of error. -EINVAL is returned for invalid values
806 * of orc, -EFAULT for invalid addresses.
808 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
809 unsigned long *oldpte, unsigned long *oldpgste)
811 unsigned long pgstev;
817 WARN_ON_ONCE(orc > ESSA_MAX);
818 if (unlikely(orc > ESSA_MAX))
820 ptep = get_locked_pte(mm, hva, &ptl);
823 pgste = pgste_get_lock(ptep);
824 pgstev = pgste_val(pgste);
826 *oldpte = pte_val(*ptep);
833 case ESSA_SET_STABLE:
834 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
835 pgstev |= _PGSTE_GPS_USAGE_STABLE;
837 case ESSA_SET_UNUSED:
838 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
839 pgstev |= _PGSTE_GPS_USAGE_UNUSED;
840 if (pte_val(*ptep) & _PAGE_INVALID)
843 case ESSA_SET_VOLATILE:
844 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
845 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
846 if (pte_val(*ptep) & _PAGE_INVALID)
849 case ESSA_SET_POT_VOLATILE:
850 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
851 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
852 pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
855 if (pgstev & _PGSTE_GPS_ZERO) {
856 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
859 if (!(pgstev & PGSTE_GC_BIT)) {
860 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
865 case ESSA_SET_STABLE_RESIDENT:
866 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
867 pgstev |= _PGSTE_GPS_USAGE_STABLE;
869 * Since the resident state can go away any time after this
870 * call, we will not make this page resident. We can revisit
871 * this decision if a guest will ever start using this.
874 case ESSA_SET_STABLE_IF_RESIDENT:
875 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
876 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
877 pgstev |= _PGSTE_GPS_USAGE_STABLE;
881 /* we should never get here! */
884 /* If we are discarding a page, set it to logical zero */
886 pgstev |= _PGSTE_GPS_ZERO;
888 pgste_val(pgste) = pgstev;
889 pgste_set_unlock(ptep, pgste);
890 pte_unmap_unlock(ptep, ptl);
893 EXPORT_SYMBOL(pgste_perform_essa);
896 * set_pgste_bits - set specific PGSTE bits.
897 * @mm: the memory context. It must have PGSTEs, no check is performed here!
898 * @hva: the host virtual address of the page whose PGSTE is to be processed
899 * @bits: a bitmask representing the bits that will be touched
900 * @value: the values of the bits to be written. Only the bits in the mask
903 * Return: 0 on success, < 0 in case of error.
905 int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
906 unsigned long bits, unsigned long value)
912 ptep = get_locked_pte(mm, hva, &ptl);
915 new = pgste_get_lock(ptep);
917 pgste_val(new) &= ~bits;
918 pgste_val(new) |= value & bits;
920 pgste_set_unlock(ptep, new);
921 pte_unmap_unlock(ptep, ptl);
924 EXPORT_SYMBOL(set_pgste_bits);
927 * get_pgste - get the current PGSTE for the given address.
928 * @mm: the memory context. It must have PGSTEs, no check is performed here!
929 * @hva: the host virtual address of the page whose PGSTE is to be processed
930 * @pgstep: will be written with the current PGSTE for the given address.
932 * Return: 0 on success, < 0 in case of error.
934 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
939 ptep = get_locked_pte(mm, hva, &ptl);
942 *pgstep = pgste_val(pgste_get(ptep));
943 pte_unmap_unlock(ptep, ptl);
946 EXPORT_SYMBOL(get_pgste);