git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
KVM: Don't pass kvm_run arguments
[safe/jmp/linux-2.6]
/
arch
/
x86
/
kvm
/
x86.c
diff --git
a/arch/x86/kvm/x86.c
b/arch/x86/kvm/x86.c
index
be451ee
..
1687d12
100644
(file)
--- a/
arch/x86/kvm/x86.c
+++ b/
arch/x86/kvm/x86.c
@@
-1591,6
+1591,8
@@
static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
if (cpuid->nent < 1)
goto out;
if (cpuid->nent < 1)
goto out;
+ if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+ cpuid->nent = KVM_MAX_CPUID_ENTRIES;
r = -ENOMEM;
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
if (!cpuid_entries)
r = -ENOMEM;
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
if (!cpuid_entries)
@@
-1690,7
+1692,7
@@
static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
unsigned bank_num = mcg_cap & 0xff, bank;
r = -EINVAL;
unsigned bank_num = mcg_cap & 0xff, bank;
r = -EINVAL;
- if (!bank_num)
+ if (!bank_num
|| bank_num >= KVM_MAX_MCE_BANKS
)
goto out;
if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
goto out;
goto out;
if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
goto out;
@@
-2755,13
+2757,13
@@
static void cache_all_regs(struct kvm_vcpu *vcpu)
}
int emulate_instruction(struct kvm_vcpu *vcpu,
}
int emulate_instruction(struct kvm_vcpu *vcpu,
- struct kvm_run *run,
unsigned long cr2,
u16 error_code,
int emulation_type)
{
int r, shadow_mask;
struct decode_cache *c;
unsigned long cr2,
u16 error_code,
int emulation_type)
{
int r, shadow_mask;
struct decode_cache *c;
+ struct kvm_run *run = vcpu->run;
kvm_clear_exception_queue(vcpu);
vcpu->arch.mmio_fault_cr2 = cr2;
kvm_clear_exception_queue(vcpu);
vcpu->arch.mmio_fault_cr2 = cr2;
@@
-2967,8
+2969,7
@@
static int pio_string_write(struct kvm_vcpu *vcpu)
return r;
}
return r;
}
-int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
- int size, unsigned port)
+int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
{
unsigned long val;
{
unsigned long val;
@@
-2997,7
+2998,7
@@
int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
}
EXPORT_SYMBOL_GPL(kvm_emulate_pio);
}
EXPORT_SYMBOL_GPL(kvm_emulate_pio);
-int kvm_emulate_pio_string(struct kvm_vcpu *vcpu,
struct kvm_run *run,
int in,
+int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
int size, unsigned long count, int down,
gva_t address, int rep, unsigned port)
{
int size, unsigned long count, int down,
gva_t address, int rep, unsigned port)
{
@@
-3451,17
+3452,17
@@
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
*
* No need to exit to userspace if we already have an interrupt queued.
*/
*
* No need to exit to userspace if we already have an interrupt queued.
*/
-static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
+static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
{
return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
{
return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
-
kvm_
run->request_interrupt_window &&
+
vcpu->
run->request_interrupt_window &&
kvm_arch_interrupt_allowed(vcpu));
}
kvm_arch_interrupt_allowed(vcpu));
}
-static void post_kvm_run_save(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
+static void post_kvm_run_save(struct kvm_vcpu *vcpu)
{
{
+ struct kvm_run *kvm_run = vcpu->run;
+
kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
@@
-3523,7
+3524,7
@@
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
}
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
}
-static void inject_pending_event(struct kvm_vcpu *vcpu
, struct kvm_run *kvm_run
)
+static void inject_pending_event(struct kvm_vcpu *vcpu)
{
/* try to reinject previous events if any */
if (vcpu->arch.exception.pending) {
{
/* try to reinject previous events if any */
if (vcpu->arch.exception.pending) {
@@
-3559,11
+3560,11
@@
static void inject_pending_event(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
}
}
}
-static int vcpu_enter_guest(struct kvm_vcpu *vcpu
, struct kvm_run *kvm_run
)
+static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
{
int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
-
kvm_
run->request_interrupt_window;
+
vcpu->
run->request_interrupt_window;
if (vcpu->requests)
if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
if (vcpu->requests)
if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
@@
-3584,12
+3585,12
@@
static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_x86_ops->tlb_flush(vcpu);
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
&vcpu->requests)) {
kvm_x86_ops->tlb_flush(vcpu);
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
&vcpu->requests)) {
-
kvm_
run->exit_reason = KVM_EXIT_TPR_ACCESS;
+
vcpu->
run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0;
goto out;
}
if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
r = 0;
goto out;
}
if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
-
kvm_
run->exit_reason = KVM_EXIT_SHUTDOWN;
+
vcpu->
run->exit_reason = KVM_EXIT_SHUTDOWN;
r = 0;
goto out;
}
r = 0;
goto out;
}
@@
-3613,7
+3614,7
@@
static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
goto out;
}
goto out;
}
- inject_pending_event(vcpu
, kvm_run
);
+ inject_pending_event(vcpu);
/* enable NMI/IRQ window open exits if needed */
if (vcpu->arch.nmi_pending)
/* enable NMI/IRQ window open exits if needed */
if (vcpu->arch.nmi_pending)
@@
-3639,7
+3640,7
@@
static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
trace_kvm_entry(vcpu->vcpu_id);
}
trace_kvm_entry(vcpu->vcpu_id);
- kvm_x86_ops->run(vcpu
, kvm_run
);
+ kvm_x86_ops->run(vcpu);
if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) {
set_debugreg(current->thread.debugreg0, 0);
if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) {
set_debugreg(current->thread.debugreg0, 0);
@@
-3680,13
+3681,13
@@
static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_lapic_sync_from_vapic(vcpu);
kvm_lapic_sync_from_vapic(vcpu);
- r = kvm_x86_ops->handle_exit(
kvm_run,
vcpu);
+ r = kvm_x86_ops->handle_exit(vcpu);
out:
return r;
}
out:
return r;
}
-static int __vcpu_run(struct kvm_vcpu *vcpu
, struct kvm_run *kvm_run
)
+static int __vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
{
int r;
@@
-3706,7
+3707,7
@@
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
r = 1;
while (r > 0) {
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
r = 1;
while (r > 0) {
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
- r = vcpu_enter_guest(vcpu
, kvm_run
);
+ r = vcpu_enter_guest(vcpu);
else {
up_read(&vcpu->kvm->slots_lock);
kvm_vcpu_block(vcpu);
else {
up_read(&vcpu->kvm->slots_lock);
kvm_vcpu_block(vcpu);
@@
-3734,14
+3735,14
@@
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
- if (dm_request_for_irq_injection(vcpu
, kvm_run
)) {
+ if (dm_request_for_irq_injection(vcpu)) {
r = -EINTR;
r = -EINTR;
-
kvm_
run->exit_reason = KVM_EXIT_INTR;
+
vcpu->
run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.request_irq_exits;
}
if (signal_pending(current)) {
r = -EINTR;
++vcpu->stat.request_irq_exits;
}
if (signal_pending(current)) {
r = -EINTR;
-
kvm_
run->exit_reason = KVM_EXIT_INTR;
+
vcpu->
run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
}
if (need_resched()) {
++vcpu->stat.signal_exits;
}
if (need_resched()) {
@@
-3752,7
+3753,7
@@
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
up_read(&vcpu->kvm->slots_lock);
}
up_read(&vcpu->kvm->slots_lock);
- post_kvm_run_save(vcpu
, kvm_run
);
+ post_kvm_run_save(vcpu);
vapic_exit(vcpu);
vapic_exit(vcpu);
@@
-3792,8
+3793,7
@@
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->mmio_needed = 0;
down_read(&vcpu->kvm->slots_lock);
vcpu->mmio_needed = 0;
down_read(&vcpu->kvm->slots_lock);
- r = emulate_instruction(vcpu, kvm_run,
- vcpu->arch.mmio_fault_cr2, 0,
+ r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
EMULTYPE_NO_DECODE);
up_read(&vcpu->kvm->slots_lock);
if (r == EMULATE_DO_MMIO) {
EMULTYPE_NO_DECODE);
up_read(&vcpu->kvm->slots_lock);
if (r == EMULATE_DO_MMIO) {
@@
-3809,7
+3809,7
@@
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_register_write(vcpu, VCPU_REGS_RAX,
kvm_run->hypercall.ret);
kvm_register_write(vcpu, VCPU_REGS_RAX,
kvm_run->hypercall.ret);
- r = __vcpu_run(vcpu
, kvm_run
);
+ r = __vcpu_run(vcpu);
out:
if (vcpu->sigset_active)
out:
if (vcpu->sigset_active)
@@
-4049,7
+4049,7
@@
static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
}
return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
}
-static
u32
get_tss_base_addr(struct kvm_vcpu *vcpu,
+static
gpa_t
get_tss_base_addr(struct kvm_vcpu *vcpu,
struct desc_struct *seg_desc)
{
u32 base_addr = get_desc_base(seg_desc);
struct desc_struct *seg_desc)
{
u32 base_addr = get_desc_base(seg_desc);