Skip to content
Snippets Groups Projects
x86.c 150 KiB
Newer Older
  • Learn to ignore specific revisions
  • 			if (IS_ERR((void *)userspace_addr))
    				return PTR_ERR((void *)userspace_addr);
    
    			memslot->userspace_addr = userspace_addr;
    
    
    	return 0;
    }
    
    void kvm_arch_commit_memory_region(struct kvm *kvm,
    				struct kvm_userspace_memory_region *mem,
    				struct kvm_memory_slot old,
    				int user_alloc)
    {
    
    	int npages = mem->memory_size >> PAGE_SHIFT;
    
    	if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
    		int ret;
    
    		down_write(&current->mm->mmap_sem);
    		ret = do_munmap(current->mm, old.userspace_addr,
    				old.npages * PAGE_SIZE);
    		up_write(&current->mm->mmap_sem);
    		if (ret < 0)
    			printk(KERN_WARNING
    			       "kvm_vm_ioctl_set_memory_region: "
    			       "failed to munmap memory\n");
    	}
    
    
    	spin_lock(&kvm->mmu_lock);
    
    	if (!kvm->arch.n_requested_mmu_pages) {
    
    		unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
    		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
    	}
    
    	kvm_mmu_slot_remove_write_access(kvm, mem->slot);
    
    	spin_unlock(&kvm->mmu_lock);
    
    void kvm_arch_flush_shadow(struct kvm *kvm)
    {
    	kvm_mmu_zap_all(kvm);
    
    	kvm_reload_remote_mmus(kvm);
    
    int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
    {
    
    	return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
    
    		|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
    		|| vcpu->arch.nmi_pending ||
    		(kvm_arch_interrupt_allowed(vcpu) &&
    		 kvm_cpu_has_interrupt(vcpu));
    
    
    void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
    {
    
    	int me;
    	int cpu = vcpu->cpu;
    
    
    	if (waitqueue_active(&vcpu->wq)) {
    		wake_up_interruptible(&vcpu->wq);
    		++vcpu->stat.halt_wakeup;
    	}
    
    
    	me = get_cpu();
    	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
    
    Avi Kivity's avatar
    Avi Kivity committed
    		if (atomic_xchg(&vcpu->guest_mode, 0))
    
    			smp_send_reschedule(cpu);
    
    
    int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
    {
    	return kvm_x86_ops->interrupt_allowed(vcpu);
    }
    
    bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
    {
    	unsigned long current_rip = kvm_rip_read(vcpu) +
    		get_segment_base(vcpu, VCPU_SREG_CS);
    
    	return current_rip == linear_rip;
    }
    EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
    
    
    unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
    {
    	unsigned long rflags;
    
    	rflags = kvm_x86_ops->get_rflags(vcpu);
    	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
    
    		rflags &= ~X86_EFLAGS_TF;
    
    	return rflags;
    }
    EXPORT_SYMBOL_GPL(kvm_get_rflags);
    
    void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
    {
    	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
    
    	    kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
    
    		rflags |= X86_EFLAGS_TF;
    
    	kvm_x86_ops->set_rflags(vcpu, rflags);
    
    	kvm_make_request(KVM_REQ_EVENT, vcpu);
    
    }
    EXPORT_SYMBOL_GPL(kvm_set_rflags);
    
    
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
    
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
    
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
    
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
    
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
    
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
    
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
    
    EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);