@@ -400,7 +400,10 @@ static inline unsigned long map_pcr_to_cap(unsigned long pcr)
400400 cap = H_GUEST_CAP_POWER9 ;
401401 break ;
402402 case PCR_ARCH_31 :
403- cap = H_GUEST_CAP_POWER10 ;
403+ if (cpu_has_feature (CPU_FTR_P11_PVR ))
404+ cap = H_GUEST_CAP_POWER11 ;
405+ else
406+ cap = H_GUEST_CAP_POWER10 ;
404407 break ;
405408 default :
406409 break ;
@@ -415,7 +418,7 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
415418 struct kvmppc_vcore * vc = vcpu -> arch .vcore ;
416419
417420 /* We can (emulate) our own architecture version and anything older */
418- if (cpu_has_feature (CPU_FTR_ARCH_31 ))
421+ if (cpu_has_feature (CPU_FTR_P11_PVR ) || cpu_has_feature ( CPU_FTR_ARCH_31 ))
419422 host_pcr_bit = PCR_ARCH_31 ;
420423 else if (cpu_has_feature (CPU_FTR_ARCH_300 ))
421424 host_pcr_bit = PCR_ARCH_300 ;
@@ -2060,36 +2063,9 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
20602063 fallthrough ; /* go to facility unavailable handler */
20612064#endif
20622065
2063- case BOOK3S_INTERRUPT_H_FAC_UNAVAIL : {
2064- u64 cause = vcpu -> arch .hfscr >> 56 ;
2065-
2066- /*
2067- * Only pass HFU interrupts to the L1 if the facility is
2068- * permitted but disabled by the L1's HFSCR, otherwise
2069- * the interrupt does not make sense to the L1 so turn
2070- * it into a HEAI.
2071- */
2072- if (!(vcpu -> arch .hfscr_permitted & (1UL << cause )) ||
2073- (vcpu -> arch .nested_hfscr & (1UL << cause ))) {
2074- ppc_inst_t pinst ;
2075- vcpu -> arch .trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST ;
2076-
2077- /*
2078- * If the fetch failed, return to guest and
2079- * try executing it again.
2080- */
2081- r = kvmppc_get_last_inst (vcpu , INST_GENERIC , & pinst );
2082- vcpu -> arch .emul_inst = ppc_inst_val (pinst );
2083- if (r != EMULATE_DONE )
2084- r = RESUME_GUEST ;
2085- else
2086- r = RESUME_HOST ;
2087- } else {
2088- r = RESUME_HOST ;
2089- }
2090-
2066+ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL :
2067+ r = RESUME_HOST ;
20912068 break ;
2092- }
20932069
20942070 case BOOK3S_INTERRUPT_HV_RM_HARD :
20952071 vcpu -> arch .trap = 0 ;
@@ -4154,7 +4130,7 @@ void kvmhv_set_l2_counters_status(int cpu, bool status)
41544130 lppaca_of (cpu ).l2_counters_enable = 0 ;
41554131}
41564132
4157- int kmvhv_counters_tracepoint_regfunc (void )
4133+ int kvmhv_counters_tracepoint_regfunc (void )
41584134{
41594135 int cpu ;
41604136
@@ -4164,7 +4140,7 @@ int kmvhv_counters_tracepoint_regfunc(void)
41644140 return 0 ;
41654141}
41664142
4167- void kmvhv_counters_tracepoint_unregfunc (void )
4143+ void kvmhv_counters_tracepoint_unregfunc (void )
41684144{
41694145 int cpu ;
41704146
@@ -4309,6 +4285,15 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
43094285 }
43104286 hvregs .hdec_expiry = time_limit ;
43114287
4288+ /*
4289+ * hvregs has the doorbell status, so zero it here which
4290+ * enables us to receive doorbells when H_ENTER_NESTED is
4291+ * in progress for this vCPU
4292+ */
4293+
4294+ if (vcpu -> arch .doorbell_request )
4295+ vcpu -> arch .doorbell_request = 0 ;
4296+
43124297 /*
43134298 * When setting DEC, we must always deal with irq_work_raise
43144299 * via NMI vs setting DEC. The problem occurs right as we
@@ -4900,7 +4885,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
49004885 lpcr |= LPCR_MER ;
49014886 }
49024887 } else if (vcpu -> arch .pending_exceptions ||
4903- vcpu -> arch .doorbell_request ||
49044888 xive_interrupt_pending (vcpu )) {
49054889 vcpu -> arch .ret = RESUME_HOST ;
49064890 goto out ;
0 commit comments