]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/s390/kvm/kvm-s390.c
KVM: Directly return result from kvm_arch_check_processor_compat()
[linux.git] / arch / s390 / kvm / kvm-s390.c
index 4638303ba6a858793eded0e331b67aeea67db5d0..7936af0a971f4ab93300be17e82127f5c7326841 100644 (file)
@@ -75,6 +75,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
        { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
        { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
+       { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
        { "halt_wakeup", VCPU_STAT(halt_wakeup) },
        { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
        { "instruction_lctl", VCPU_STAT(instruction_lctl) },
@@ -177,6 +178,11 @@ static int hpage;
 module_param(hpage, int, 0444);
 MODULE_PARM_DESC(hpage, "1m huge page backing support");
 
+/* maximum percentage of steal time for polling.  >100 is treated like 100 */
+static u8 halt_poll_max_steal = 10;
+module_param(halt_poll_max_steal, byte, 0644);
+MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
+
 /*
  * For now we handle at most 16 double words as this is what the s390 base
  * kernel handles and stores in the prefix page. If we ever need to go beyond
@@ -221,6 +227,11 @@ int kvm_arch_hardware_enable(void)
        return 0;
 }
 
+int kvm_arch_check_processor_compat(void)
+{
+       return 0;
+}
+
 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
                              unsigned long end);
 
@@ -321,6 +332,22 @@ static inline int plo_test_bit(unsigned char nr)
        return cc == 0;
 }
 
+static inline void __insn32_query(unsigned int opcode, u8 query[32])
+{
+       register unsigned long r0 asm("0") = 0; /* query function */
+       register unsigned long r1 asm("1") = (unsigned long) query;
+
+       asm volatile(
+               /* Parameter regs are ignored */
+               "       .insn   rrf,%[opc] << 16,2,4,6,0\n"
+               : "=m" (*query)
+               : "d" (r0), "a" (r1), [opc] "i" (opcode)
+               : "cc");
+}
+
+#define INSN_SORTL 0xb938
+#define INSN_DFLTCC 0xb939
+
 static void kvm_s390_cpu_feat_init(void)
 {
        int i;
@@ -368,6 +395,16 @@ static void kvm_s390_cpu_feat_init(void)
                __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
                              kvm_s390_available_subfunc.kma);
 
+       if (test_facility(155)) /* MSA9 */
+               __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.kdsa);
+
+       if (test_facility(150)) /* SORTL */
+               __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
+
+       if (test_facility(151)) /* DFLTCC */
+               __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
+
        if (MACHINE_HAS_ESOP)
                allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
        /*
@@ -507,15 +544,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
        case KVM_CAP_NR_VCPUS:
        case KVM_CAP_MAX_VCPUS:
+       case KVM_CAP_MAX_VCPU_ID:
                r = KVM_S390_BSCA_CPU_SLOTS;
                if (!kvm_s390_use_sca_entries())
                        r = KVM_MAX_VCPUS;
                else if (sclp.has_esca && sclp.has_64bscao)
                        r = KVM_S390_ESCA_CPU_SLOTS;
                break;
-       case KVM_CAP_NR_MEMSLOTS:
-               r = KVM_USER_MEM_SLOTS;
-               break;
        case KVM_CAP_S390_COW:
                r = MACHINE_HAS_ESOP;
                break;
@@ -657,6 +692,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
                                set_kvm_facility(kvm->arch.model.fac_mask, 135);
                                set_kvm_facility(kvm->arch.model.fac_list, 135);
                        }
+                       if (test_facility(148)) {
+                               set_kvm_facility(kvm->arch.model.fac_mask, 148);
+                               set_kvm_facility(kvm->arch.model.fac_list, 148);
+                       }
+                       if (test_facility(152)) {
+                               set_kvm_facility(kvm->arch.model.fac_mask, 152);
+                               set_kvm_facility(kvm->arch.model.fac_list, 152);
+                       }
                        r = 0;
                } else
                        r = -EINVAL;
@@ -1323,6 +1366,19 @@ static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
        VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
                 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
                 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
+       VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
+                ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
+                ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
+       VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+                ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
+                ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
+                ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
+                ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
+       VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+                ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
+                ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
+                ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
+                ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
 
        return 0;
 }
@@ -1491,6 +1547,19 @@ static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
        VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
                 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
                 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
+       VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
+                ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
+                ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
+       VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+                ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
+                ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
+                ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
+                ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
+       VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+                ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
+                ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
+                ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
+                ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
 
        return 0;
 }
@@ -1546,6 +1615,19 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
        VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
                 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
                 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
+       VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
+                ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
+                ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
+       VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+                ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
+                ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
+                ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
+                ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
+       VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+                ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
+                ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
+                ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
+                ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
 
        return 0;
 }
@@ -2817,6 +2899,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
        vcpu->arch.enabled_gmap = vcpu->arch.gmap;
 }
 
+static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
+{
+       if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
+           test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
+               return true;
+       return false;
+}
+
+static bool kvm_has_pckmo_ecc(struct kvm *kvm)
+{
+       /* At least one ECC subfunction must be present */
+       return kvm_has_pckmo_subfunc(kvm, 32) ||
+              kvm_has_pckmo_subfunc(kvm, 33) ||
+              kvm_has_pckmo_subfunc(kvm, 34) ||
+              kvm_has_pckmo_subfunc(kvm, 40) ||
+              kvm_has_pckmo_subfunc(kvm, 41);
+
+}
+
 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
 {
        /*
@@ -2829,13 +2930,19 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
        vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
        vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
        vcpu->arch.sie_block->eca &= ~ECA_APIE;
+       vcpu->arch.sie_block->ecd &= ~ECD_ECC;
 
        if (vcpu->kvm->arch.crypto.apie)
                vcpu->arch.sie_block->eca |= ECA_APIE;
 
        /* Set up protected key support */
-       if (vcpu->kvm->arch.crypto.aes_kw)
+       if (vcpu->kvm->arch.crypto.aes_kw) {
                vcpu->arch.sie_block->ecb3 |= ECB3_AES;
+               /* ecc is also wrapped with AES key */
+               if (kvm_has_pckmo_ecc(vcpu->kvm))
+                       vcpu->arch.sie_block->ecd |= ECD_ECC;
+       }
+
        if (vcpu->kvm->arch.crypto.dea_kw)
                vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
 }
@@ -3068,6 +3175,17 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
        }
 }
 
+bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
+{
+       /* do not poll with more than halt_poll_max_steal percent of steal time */
+       if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
+           halt_poll_max_steal) {
+               vcpu->stat.halt_no_poll_steal++;
+               return true;
+       }
+       return false;
+}
+
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
        /* kvm common code refers to this, but never calls it */
@@ -4412,21 +4530,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                                const struct kvm_memory_slot *new,
                                enum kvm_mr_change change)
 {
-       int rc;
-
-       /* If the basics of the memslot do not change, we do not want
-        * to update the gmap. Every update causes several unnecessary
-        * segment translation exceptions. This is usually handled just
-        * fine by the normal fault handler + gmap, but it will also
-        * cause faults on the prefix page of running guest CPUs.
-        */
-       if (old->userspace_addr == mem->userspace_addr &&
-           old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
-           old->npages * PAGE_SIZE == mem->memory_size)
-               return;
+       int rc = 0;
 
-       rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
-               mem->guest_phys_addr, mem->memory_size);
+       switch (change) {
+       case KVM_MR_DELETE:
+               rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
+                                       old->npages * PAGE_SIZE);
+               break;
+       case KVM_MR_MOVE:
+               rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
+                                       old->npages * PAGE_SIZE);
+               if (rc)
+                       break;
+               /* FALLTHROUGH */
+       case KVM_MR_CREATE:
+               rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
+                                     mem->guest_phys_addr, mem->memory_size);
+               break;
+       case KVM_MR_FLAGS_ONLY:
+               break;
+       default:
+               WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
+       }
        if (rc)
                pr_warn("failed to commit memory region\n");
        return;