Skip to content
Snippets Groups Projects
x86.c 107 KiB
Newer Older
  • Learn to ignore specific revisions
  • 		vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
    	}
    
    
    	r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
    
    
    	if (dbg->control & KVM_GUESTDBG_INJECT_DB)
    		kvm_queue_exception(vcpu, DB_VECTOR);
    	else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
    		kvm_queue_exception(vcpu, BP_VECTOR);
    
    
    /*
     * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
     * we have asm/x86/processor.h
     */
    struct fxsave {
    	u16	cwd;
    	u16	swd;
    	u16	twd;
    	u16	fop;
    	u64	rip;
    	u64	rdp;
    	u32	mxcsr;
    	u32	mxcsr_mask;
    	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
    #ifdef CONFIG_X86_64
    	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 256 bytes */
    #else
    	u32	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
    #endif
    };
    
    
    /*
     * Translate a guest virtual address to a guest physical address.
     */
    int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
    				    struct kvm_translation *tr)
    {
    	unsigned long vaddr = tr->linear_address;
    	gpa_t gpa;
    
    	vcpu_load(vcpu);
    
    	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
    
    	tr->physical_address = gpa;
    	tr->valid = gpa != UNMAPPED_GVA;
    	tr->writeable = 1;
    	tr->usermode = 0;
    	vcpu_put(vcpu);
    
    	return 0;
    }
    
    
    int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
    {
    
    	struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
    
    
    	vcpu_load(vcpu);
    
    	memcpy(fpu->fpr, fxsave->st_space, 128);
    	fpu->fcw = fxsave->cwd;
    	fpu->fsw = fxsave->swd;
    	fpu->ftwx = fxsave->twd;
    	fpu->last_opcode = fxsave->fop;
    	fpu->last_ip = fxsave->rip;
    	fpu->last_dp = fxsave->rdp;
    	memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
    
    	vcpu_put(vcpu);
    
    	return 0;
    }
    
    int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
    {
    
    	struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
    
    
    	vcpu_load(vcpu);
    
    	memcpy(fxsave->st_space, fpu->fpr, 128);
    	fxsave->cwd = fpu->fcw;
    	fxsave->swd = fpu->fsw;
    	fxsave->twd = fpu->ftwx;
    	fxsave->fop = fpu->last_opcode;
    	fxsave->rip = fpu->last_ip;
    	fxsave->rdp = fpu->last_dp;
    	memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
    
    	vcpu_put(vcpu);
    
    	return 0;
    }
    
    void fx_init(struct kvm_vcpu *vcpu)
    {
    	unsigned after_mxcsr_mask;
    
    
    	/*
    	 * Touch the fpu the first time in non atomic context as if
    	 * this is the first fpu instruction the exception handler
    	 * will fire before the instruction returns and it'll have to
    	 * allocate ram with GFP_KERNEL.
    	 */
    	if (!used_math())
    
    		kvm_fx_save(&vcpu->arch.host_fx_image);
    
    	/* Initialize guest FPU by resetting ours and saving into guest's */
    	preempt_disable();
    
    	kvm_fx_save(&vcpu->arch.host_fx_image);
    	kvm_fx_finit();
    	kvm_fx_save(&vcpu->arch.guest_fx_image);
    	kvm_fx_restore(&vcpu->arch.host_fx_image);
    
    	vcpu->arch.cr0 |= X86_CR0_ET;
    
    	after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
    
    	vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
    	memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
    
    	       0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
    }
    EXPORT_SYMBOL_GPL(fx_init);
    
    void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
    {
    	if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
    		return;
    
    	vcpu->guest_fpu_loaded = 1;
    
    	kvm_fx_save(&vcpu->arch.host_fx_image);
    	kvm_fx_restore(&vcpu->arch.guest_fx_image);
    
    }
    EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
    
    void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
    {
    	if (!vcpu->guest_fpu_loaded)
    		return;
    
    	vcpu->guest_fpu_loaded = 0;
    
    	kvm_fx_save(&vcpu->arch.guest_fx_image);
    	kvm_fx_restore(&vcpu->arch.host_fx_image);
    
    Avi Kivity's avatar
    Avi Kivity committed
    	++vcpu->stat.fpu_reload;
    
    }
    EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
    
    
    void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
    {
    	kvm_x86_ops->vcpu_free(vcpu);
    }
    
    struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
    						unsigned int id)
    {
    
    	return kvm_x86_ops->vcpu_create(kvm, id);
    }
    
    int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
    {
    	int r;
    
    
    	/* We do fxsave: this must be aligned. */
    
    	BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
    
    Sheng Yang's avatar
    Sheng Yang committed
    	vcpu->arch.mtrr_state.have_fixed = 1;
    
    	vcpu_load(vcpu);
    	r = kvm_arch_vcpu_reset(vcpu);
    	if (r == 0)
    		r = kvm_mmu_setup(vcpu);
    	vcpu_put(vcpu);
    	if (r < 0)
    		goto free_vcpu;
    
    
    free_vcpu:
    	kvm_x86_ops->vcpu_free(vcpu);
    
    void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
    
    {
    	vcpu_load(vcpu);
    	kvm_mmu_unload(vcpu);
    	vcpu_put(vcpu);
    
    	kvm_x86_ops->vcpu_free(vcpu);
    }
    
    int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
    {
    
    	vcpu->arch.nmi_pending = false;
    	vcpu->arch.nmi_injected = false;
    
    
    	vcpu->arch.switch_db_regs = 0;
    	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
    	vcpu->arch.dr6 = DR6_FIXED_1;
    	vcpu->arch.dr7 = DR7_FIXED_1;
    
    
    	return kvm_x86_ops->vcpu_reset(vcpu);
    }
    
    void kvm_arch_hardware_enable(void *garbage)
    {
    	kvm_x86_ops->hardware_enable(garbage);
    }
    
    void kvm_arch_hardware_disable(void *garbage)
    {
    	kvm_x86_ops->hardware_disable(garbage);
    }
    
    int kvm_arch_hardware_setup(void)
    {
    	return kvm_x86_ops->hardware_setup();
    }
    
    void kvm_arch_hardware_unsetup(void)
    {
    	kvm_x86_ops->hardware_unsetup();
    }
    
    void kvm_arch_check_processor_compat(void *rtn)
    {
    	kvm_x86_ops->check_processor_compatibility(rtn);
    }
    
    int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
    {
    	struct page *page;
    	struct kvm *kvm;
    	int r;
    
    	BUG_ON(vcpu->kvm == NULL);
    	kvm = vcpu->kvm;
    
    
    	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
    
    	if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
    
    		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
    
    		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
    
    
    	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
    	if (!page) {
    		r = -ENOMEM;
    		goto fail;
    	}
    
    	vcpu->arch.pio_data = page_address(page);
    
    
    	r = kvm_mmu_create(vcpu);
    	if (r < 0)
    		goto fail_free_pio_data;
    
    	if (irqchip_in_kernel(kvm)) {
    		r = kvm_create_lapic(vcpu);
    		if (r < 0)
    			goto fail_mmu_destroy;
    	}
    
    	return 0;
    
    fail_mmu_destroy:
    	kvm_mmu_destroy(vcpu);
    fail_free_pio_data:
    
    	free_page((unsigned long)vcpu->arch.pio_data);
    
    fail:
    	return r;
    }
    
    void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
    {
    	kvm_free_lapic(vcpu);
    
    	down_read(&vcpu->kvm->slots_lock);
    
    	up_read(&vcpu->kvm->slots_lock);
    
    	free_page((unsigned long)vcpu->arch.pio_data);
    
    
    struct  kvm *kvm_arch_create_vm(void)
    {
    	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
    
    	if (!kvm)
    		return ERR_PTR(-ENOMEM);
    
    
    	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
    
    	INIT_LIST_HEAD(&kvm->arch.oos_global_pages);
    
    	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
    
    	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
    	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
    
    
    	return kvm;
    }
    
    static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
    {
    	vcpu_load(vcpu);
    	kvm_mmu_unload(vcpu);
    	vcpu_put(vcpu);
    }
    
    static void kvm_free_vcpus(struct kvm *kvm)
    {
    	unsigned int i;
    
    	/*
    	 * Unpin any mmu pages first.
    	 */
    	for (i = 0; i < KVM_MAX_VCPUS; ++i)
    		if (kvm->vcpus[i])
    			kvm_unload_vcpu_mmu(kvm->vcpus[i]);
    	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
    		if (kvm->vcpus[i]) {
    			kvm_arch_vcpu_free(kvm->vcpus[i]);
    			kvm->vcpus[i] = NULL;
    		}
    	}
    
    }
    
    
    void kvm_arch_sync_events(struct kvm *kvm)
    {
    
    	kvm_free_all_assigned_devices(kvm);
    
    void kvm_arch_destroy_vm(struct kvm *kvm)
    {
    
    	kvm_iommu_unmap_guest(kvm);
    
    Sheng Yang's avatar
    Sheng Yang committed
    	kvm_free_pit(kvm);
    
    	kfree(kvm->arch.vpic);
    	kfree(kvm->arch.vioapic);
    
    	kvm_free_vcpus(kvm);
    	kvm_free_physmem(kvm);
    
    	if (kvm->arch.apic_access_page)
    		put_page(kvm->arch.apic_access_page);
    
    	if (kvm->arch.ept_identity_pagetable)
    		put_page(kvm->arch.ept_identity_pagetable);
    
    
    int kvm_arch_set_memory_region(struct kvm *kvm,
    				struct kvm_userspace_memory_region *mem,
    				struct kvm_memory_slot old,
    				int user_alloc)
    {
    	int npages = mem->memory_size >> PAGE_SHIFT;
    	struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
    
    	/*To keep backward compatibility with older userspace,
    	 *x86 needs to hanlde !user_alloc case.
    	 */
    	if (!user_alloc) {
    		if (npages && !old.rmap) {
    
    			unsigned long userspace_addr;
    
    
    			userspace_addr = do_mmap(NULL, 0,
    						 npages * PAGE_SIZE,
    						 PROT_READ | PROT_WRITE,
    
    						 MAP_PRIVATE | MAP_ANONYMOUS,
    
    			if (IS_ERR((void *)userspace_addr))
    				return PTR_ERR((void *)userspace_addr);
    
    			/* set userspace_addr atomically for kvm_hva_to_rmapp */
    			spin_lock(&kvm->mmu_lock);
    			memslot->userspace_addr = userspace_addr;
    			spin_unlock(&kvm->mmu_lock);
    
    		} else {
    			if (!old.user_alloc && old.rmap) {
    				int ret;
    
    
    				ret = do_munmap(current->mm, old.userspace_addr,
    						old.npages * PAGE_SIZE);
    
    				if (ret < 0)
    					printk(KERN_WARNING
    				       "kvm_vm_ioctl_set_memory_region: "
    				       "failed to munmap memory\n");
    			}
    		}
    	}
    
    
    	if (!kvm->arch.n_requested_mmu_pages) {
    
    		unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
    		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
    	}
    
    	kvm_mmu_slot_remove_write_access(kvm, mem->slot);
    	kvm_flush_remote_tlbs(kvm);
    
    	return 0;
    }
    
    void kvm_arch_flush_shadow(struct kvm *kvm)
    {
    	kvm_mmu_zap_all(kvm);
    }
    
    
    int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
    {
    
    	return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
    
    	       || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
    	       || vcpu->arch.nmi_pending;
    
    
    static void vcpu_kick_intr(void *info)
    {
    #ifdef DEBUG
    	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
    	printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
    #endif
    }
    
    void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
    {
    	int ipi_pcpu = vcpu->cpu;
    
    	int cpu = get_cpu();
    
    
    	if (waitqueue_active(&vcpu->wq)) {
    		wake_up_interruptible(&vcpu->wq);
    		++vcpu->stat.halt_wakeup;
    	}
    
    	/*
    	 * We may be called synchronously with irqs disabled in guest mode,
    	 * So need not to call smp_call_function_single() in that case.
    	 */
    	if (vcpu->guest_mode && vcpu->cpu != cpu)
    
    		smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);