@@ -682,6 +682,21 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 mask;
+
+ if (p->is_write) {
+ mask = kvm_pmu_valid_counter_mask(vcpu);
+ kvm_pmu_software_increment(vcpu, p->regval & mask);
+ } else {
+ kvm_inject_undefined(vcpu);
+ }
+
+ return true;
+}
+
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
/* DBGBVRn_EL1 */ \
@@ -892,7 +907,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
access_pmovs, NULL, PMOVSSET_EL0 },
/* PMSWINC_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
- trap_raz_wi },
+ access_pmswinc, reset_unknown, PMSWINC_EL0 },
/* PMSELR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
access_pmselr, reset_unknown, PMSELR_EL0 },
@@ -1231,6 +1246,7 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
@@ -40,6 +40,7 @@ u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx);
#else
@@ -57,6 +58,7 @@ u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
+void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx) {}
#endif
@@ -160,6 +160,35 @@ void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
kvm_vcpu_kick(vcpu);
}
+/**
+ * kvm_pmu_software_increment - do software increment
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMSWINC register
+ */
+void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
+{
+ int i;
+ u64 type, enable, reg;
+
+ if (val == 0)
+ return;
+
+ for (i = 0; i < ARMV8_CYCLE_IDX; i++) {
+ if (!(val & BIT(i)))
+ continue;
+ type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
+ & ARMV8_EVTYPE_EVENT;
+ enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+ if ((type == 0) && (enable & BIT(i))) {
+ reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+ reg = lower_32_bits(reg);
+ vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
+ if (!reg)
+ kvm_pmu_overflow_set(vcpu, BIT(i));
+ }
+ }
+}
+
static inline bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu,
u64 select_idx)
{
@@ -189,6 +218,10 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
kvm_pmu_stop_counter(vcpu, pmc);
eventsel = data & ARMV8_EVTYPE_EVENT;
+ /* For software increment event it does't need to create perf event */
+ if (eventsel == 0)
+ return;
+
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.type = PERF_TYPE_RAW;
attr.size = sizeof(attr);