Skip to content

Commit 6abe9c1

Browse files
xzpeterbonzini
authored andcommitted
KVM: X86: Move ignore_msrs handling upper the stack
MSR accesses can be one of: (1) KVM internal access, (2) userspace access (e.g., via KVM_SET_MSRS ioctl), (3) guest access. The ignore_msrs was previously handled by kvm_get_msr_common() and kvm_set_msr_common(), which is the bottom of the msr access stack. It's working in most cases, however it could dump unwanted warning messages to dmesg even if kvm get/set the msrs internally when calling __kvm_set_msr() or __kvm_get_msr() (e.g. kvm_cpuid()). Ideally we only want to trap cases (2) or (3), but not (1) above. To achieve this, move the ignore_msrs handling upper until the callers of __kvm_get_msr() and __kvm_set_msr(). To identify the "msr missing" event, a new return value (KVM_MSR_RET_INVALID==2) is used for that. Signed-off-by: Peter Xu <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 02f5fb2 commit 6abe9c1

File tree

2 files changed

+56
-26
lines changed

2 files changed

+56
-26
lines changed

arch/x86/kvm/x86.c

Lines changed: 54 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,29 @@ static struct kmem_cache *x86_fpu_cache;
243243

244244
static struct kmem_cache *x86_emulator_cache;
245245

246+
/*
247+
* When called, it means the previous get/set msr reached an invalid msr.
248+
* Return 0 if we want to ignore/silent this failed msr access, or 1 if we want
249+
* to fail the caller.
250+
*/
251+
static int kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
252+
u64 data, bool write)
253+
{
254+
const char *op = write ? "wrmsr" : "rdmsr";
255+
256+
if (ignore_msrs) {
257+
if (report_ignored_msrs)
258+
vcpu_unimpl(vcpu, "ignored %s: 0x%x data 0x%llx\n",
259+
op, msr, data);
260+
/* Mask the error */
261+
return 0;
262+
} else {
263+
vcpu_debug_ratelimited(vcpu, "unhandled %s: 0x%x data 0x%llx\n",
264+
op, msr, data);
265+
return 1;
266+
}
267+
}
268+
246269
static struct kmem_cache *kvm_alloc_emulator_cache(void)
247270
{
248271
unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src);
@@ -1516,6 +1539,17 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
15161539
return kvm_x86_ops.set_msr(vcpu, &msr);
15171540
}
15181541

1542+
static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
1543+
u32 index, u64 data, bool host_initiated)
1544+
{
1545+
int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
1546+
1547+
if (ret == KVM_MSR_RET_INVALID)
1548+
ret = kvm_msr_ignored_check(vcpu, index, data, true);
1549+
1550+
return ret;
1551+
}
1552+
15191553
/*
15201554
* Read the MSR specified by @index into @data. Select MSR specific fault
15211555
* checks are bypassed if @host_initiated is %true.
@@ -1537,15 +1571,29 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
15371571
return ret;
15381572
}
15391573

1574+
static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
1575+
u32 index, u64 *data, bool host_initiated)
1576+
{
1577+
int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
1578+
1579+
if (ret == KVM_MSR_RET_INVALID) {
1580+
/* Unconditionally clear *data for simplicity */
1581+
*data = 0;
1582+
ret = kvm_msr_ignored_check(vcpu, index, 0, false);
1583+
}
1584+
1585+
return ret;
1586+
}
1587+
15401588
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
15411589
{
1542-
return __kvm_get_msr(vcpu, index, data, false);
1590+
return kvm_get_msr_ignored_check(vcpu, index, data, false);
15431591
}
15441592
EXPORT_SYMBOL_GPL(kvm_get_msr);
15451593

15461594
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
15471595
{
1548-
return __kvm_set_msr(vcpu, index, data, false);
1596+
return kvm_set_msr_ignored_check(vcpu, index, data, false);
15491597
}
15501598
EXPORT_SYMBOL_GPL(kvm_set_msr);
15511599

@@ -1665,12 +1713,12 @@ EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
16651713
*/
16661714
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
16671715
{
1668-
return __kvm_get_msr(vcpu, index, data, true);
1716+
return kvm_get_msr_ignored_check(vcpu, index, data, true);
16691717
}
16701718

16711719
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
16721720
{
1673-
return __kvm_set_msr(vcpu, index, *data, true);
1721+
return kvm_set_msr_ignored_check(vcpu, index, *data, true);
16741722
}
16751723

16761724
#ifdef CONFIG_X86_64
@@ -3066,17 +3114,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
30663114
return xen_hvm_config(vcpu, data);
30673115
if (kvm_pmu_is_valid_msr(vcpu, msr))
30683116
return kvm_pmu_set_msr(vcpu, msr_info);
3069-
if (!ignore_msrs) {
3070-
vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
3071-
msr, data);
3072-
return 1;
3073-
} else {
3074-
if (report_ignored_msrs)
3075-
vcpu_unimpl(vcpu,
3076-
"ignored wrmsr: 0x%x data 0x%llx\n",
3077-
msr, data);
3078-
break;
3079-
}
3117+
return KVM_MSR_RET_INVALID;
30803118
}
30813119
return 0;
30823120
}
@@ -3331,17 +3369,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
33313369
default:
33323370
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
33333371
return kvm_pmu_get_msr(vcpu, msr_info);
3334-
if (!ignore_msrs) {
3335-
vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
3336-
msr_info->index);
3337-
return 1;
3338-
} else {
3339-
if (report_ignored_msrs)
3340-
vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",
3341-
msr_info->index);
3342-
msr_info->data = 0;
3343-
}
3344-
break;
3372+
return KVM_MSR_RET_INVALID;
33453373
}
33463374
return 0;
33473375
}

arch/x86/kvm/x86.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -366,4 +366,6 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
366366
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
367367
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
368368

369+
#define KVM_MSR_RET_INVALID 2
370+
369371
#endif

0 commit comments

Comments
 (0)