Skip to content
Snippets Groups Projects
x86.c 59.6 KiB
Newer Older
  • Learn to ignore specific revisions
  • 	if (vcpu->requests)
    		if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
    			kvm_x86_ops->tlb_flush(vcpu);
    
    	kvm_x86_ops->run(vcpu, kvm_run);
    
    	vcpu->guest_mode = 0;
    	local_irq_enable();
    
    	++vcpu->stat.exits;
    
    	/*
    	 * We must have an instruction between local_irq_enable() and
    	 * kvm_guest_exit(), so the timer interrupt isn't delayed by
    	 * the interrupt shadow.  The stat.exits increment will do nicely.
    	 * But we need to prevent reordering, hence this barrier():
    	 */
    	barrier();
    
    	kvm_guest_exit();
    
    	preempt_enable();
    
    	/*
    	 * Profile KVM exit RIPs:
    	 */
    	if (unlikely(prof_on == KVM_PROFILING)) {
    		kvm_x86_ops->cache_regs(vcpu);
    		profile_hit(KVM_PROFILING, (void *)vcpu->rip);
    	}
    
    	r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
    
    	if (r > 0) {
    		if (dm_request_for_irq_injection(vcpu, kvm_run)) {
    			r = -EINTR;
    			kvm_run->exit_reason = KVM_EXIT_INTR;
    			++vcpu->stat.request_irq_exits;
    			goto out;
    		}
    
    2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347
    			goto again;
    	}
    
    out:
    	if (r > 0) {
    		kvm_resched(vcpu);
    		goto preempted;
    	}
    
    	post_kvm_run_save(vcpu, kvm_run);
    
    	return r;
    }
    
    int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
    {
    	int r;
    	sigset_t sigsaved;
    
    	vcpu_load(vcpu);
    
    	if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
    		kvm_vcpu_block(vcpu);
    		vcpu_put(vcpu);
    		return -EAGAIN;
    	}
    
    	if (vcpu->sigset_active)
    		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
    
    	/* re-sync apic's tpr */
    	if (!irqchip_in_kernel(vcpu->kvm))
    		set_cr8(vcpu, kvm_run->cr8);
    
    	if (vcpu->pio.cur_count) {
    		r = complete_pio(vcpu);
    		if (r)
    			goto out;
    	}
    #if CONFIG_HAS_IOMEM
    	if (vcpu->mmio_needed) {
    		memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
    		vcpu->mmio_read_completed = 1;
    		vcpu->mmio_needed = 0;
    		r = emulate_instruction(vcpu, kvm_run,
    					vcpu->mmio_fault_cr2, 0, 1);
    		if (r == EMULATE_DO_MMIO) {
    			/*
    			 * Read-modify-write.  Back to userspace.
    			 */
    			r = 0;
    			goto out;
    		}
    	}
    #endif
    	if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
    		kvm_x86_ops->cache_regs(vcpu);
    		vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
    		kvm_x86_ops->decache_regs(vcpu);
    	}
    
    	r = __vcpu_run(vcpu, kvm_run);
    
    out:
    	if (vcpu->sigset_active)
    		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
    
    	vcpu_put(vcpu);
    	return r;
    }
    
    int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
    {
    	vcpu_load(vcpu);
    
    	kvm_x86_ops->cache_regs(vcpu);
    
    	regs->rax = vcpu->regs[VCPU_REGS_RAX];
    	regs->rbx = vcpu->regs[VCPU_REGS_RBX];
    	regs->rcx = vcpu->regs[VCPU_REGS_RCX];
    	regs->rdx = vcpu->regs[VCPU_REGS_RDX];
    	regs->rsi = vcpu->regs[VCPU_REGS_RSI];
    	regs->rdi = vcpu->regs[VCPU_REGS_RDI];
    	regs->rsp = vcpu->regs[VCPU_REGS_RSP];
    	regs->rbp = vcpu->regs[VCPU_REGS_RBP];
    #ifdef CONFIG_X86_64
    	regs->r8 = vcpu->regs[VCPU_REGS_R8];
    	regs->r9 = vcpu->regs[VCPU_REGS_R9];
    	regs->r10 = vcpu->regs[VCPU_REGS_R10];
    	regs->r11 = vcpu->regs[VCPU_REGS_R11];
    	regs->r12 = vcpu->regs[VCPU_REGS_R12];
    	regs->r13 = vcpu->regs[VCPU_REGS_R13];
    	regs->r14 = vcpu->regs[VCPU_REGS_R14];
    	regs->r15 = vcpu->regs[VCPU_REGS_R15];
    #endif
    
    	regs->rip = vcpu->rip;
    	regs->rflags = kvm_x86_ops->get_rflags(vcpu);
    
    	/*
    	 * Don't leak debug flags in case they were set for guest debugging
    	 */
    	if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
    		regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
    
    	vcpu_put(vcpu);
    
    	return 0;
    }
    
    int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
    {
    	vcpu_load(vcpu);
    
    	vcpu->regs[VCPU_REGS_RAX] = regs->rax;
    	vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
    	vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
    	vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
    	vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
    	vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
    	vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
    	vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
    #ifdef CONFIG_X86_64
    	vcpu->regs[VCPU_REGS_R8] = regs->r8;
    	vcpu->regs[VCPU_REGS_R9] = regs->r9;
    	vcpu->regs[VCPU_REGS_R10] = regs->r10;
    	vcpu->regs[VCPU_REGS_R11] = regs->r11;
    	vcpu->regs[VCPU_REGS_R12] = regs->r12;
    	vcpu->regs[VCPU_REGS_R13] = regs->r13;
    	vcpu->regs[VCPU_REGS_R14] = regs->r14;
    	vcpu->regs[VCPU_REGS_R15] = regs->r15;
    #endif
    
    	vcpu->rip = regs->rip;
    	kvm_x86_ops->set_rflags(vcpu, regs->rflags);
    
    	kvm_x86_ops->decache_regs(vcpu);
    
    	vcpu_put(vcpu);
    
    	return 0;
    }
    
    static void get_segment(struct kvm_vcpu *vcpu,
    			struct kvm_segment *var, int seg)
    {
    	return kvm_x86_ops->get_segment(vcpu, var, seg);
    }
    
    void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
    {
    	struct kvm_segment cs;
    
    	get_segment(vcpu, &cs, VCPU_SREG_CS);
    	*db = cs.db;
    	*l = cs.l;
    }
    EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
    
    int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
    				  struct kvm_sregs *sregs)
    {
    	struct descriptor_table dt;
    	int pending_vec;
    
    	vcpu_load(vcpu);
    
    	get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
    	get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
    	get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
    	get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
    	get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
    	get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
    
    	get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
    	get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
    
    	kvm_x86_ops->get_idt(vcpu, &dt);
    	sregs->idt.limit = dt.limit;
    	sregs->idt.base = dt.base;
    	kvm_x86_ops->get_gdt(vcpu, &dt);
    	sregs->gdt.limit = dt.limit;
    	sregs->gdt.base = dt.base;
    
    	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
    	sregs->cr0 = vcpu->cr0;
    	sregs->cr2 = vcpu->cr2;
    	sregs->cr3 = vcpu->cr3;
    	sregs->cr4 = vcpu->cr4;
    	sregs->cr8 = get_cr8(vcpu);
    	sregs->efer = vcpu->shadow_efer;
    	sregs->apic_base = kvm_get_apic_base(vcpu);
    
    	if (irqchip_in_kernel(vcpu->kvm)) {
    		memset(sregs->interrupt_bitmap, 0,
    		       sizeof sregs->interrupt_bitmap);
    		pending_vec = kvm_x86_ops->get_irq(vcpu);
    		if (pending_vec >= 0)
    			set_bit(pending_vec,
    				(unsigned long *)sregs->interrupt_bitmap);
    	} else
    		memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
    		       sizeof sregs->interrupt_bitmap);
    
    	vcpu_put(vcpu);
    
    	return 0;
    }
    
    static void set_segment(struct kvm_vcpu *vcpu,
    			struct kvm_segment *var, int seg)
    {
    	return kvm_x86_ops->set_segment(vcpu, var, seg);
    }
    
    int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
    				  struct kvm_sregs *sregs)
    {
    	int mmu_reset_needed = 0;
    	int i, pending_vec, max_bits;
    	struct descriptor_table dt;
    
    	vcpu_load(vcpu);
    
    	dt.limit = sregs->idt.limit;
    	dt.base = sregs->idt.base;
    	kvm_x86_ops->set_idt(vcpu, &dt);
    	dt.limit = sregs->gdt.limit;
    	dt.base = sregs->gdt.base;
    	kvm_x86_ops->set_gdt(vcpu, &dt);
    
    	vcpu->cr2 = sregs->cr2;
    	mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
    	vcpu->cr3 = sregs->cr3;
    
    	set_cr8(vcpu, sregs->cr8);
    
    	mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
    #ifdef CONFIG_X86_64
    	kvm_x86_ops->set_efer(vcpu, sregs->efer);
    #endif
    	kvm_set_apic_base(vcpu, sregs->apic_base);
    
    	kvm_x86_ops->decache_cr4_guest_bits(vcpu);
    
    	mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
    	vcpu->cr0 = sregs->cr0;
    	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
    
    	mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
    	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
    	if (!is_long_mode(vcpu) && is_pae(vcpu))
    		load_pdptrs(vcpu, vcpu->cr3);
    
    	if (mmu_reset_needed)
    		kvm_mmu_reset_context(vcpu);
    
    	if (!irqchip_in_kernel(vcpu->kvm)) {
    		memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
    		       sizeof vcpu->irq_pending);
    		vcpu->irq_summary = 0;
    		for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
    			if (vcpu->irq_pending[i])
    				__set_bit(i, &vcpu->irq_summary);
    	} else {
    		max_bits = (sizeof sregs->interrupt_bitmap) << 3;
    		pending_vec = find_first_bit(
    			(const unsigned long *)sregs->interrupt_bitmap,
    			max_bits);
    		/* Only pending external irq is handled here */
    		if (pending_vec < max_bits) {
    			kvm_x86_ops->set_irq(vcpu, pending_vec);
    			pr_debug("Set back pending irq %d\n",
    				 pending_vec);
    		}
    	}
    
    	set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
    	set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
    	set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
    	set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
    	set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
    	set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
    
    	set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
    	set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
    
    	vcpu_put(vcpu);
    
    	return 0;
    }
    
    int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
    				    struct kvm_debug_guest *dbg)
    {
    	int r;
    
    	vcpu_load(vcpu);
    
    	r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
    
    	vcpu_put(vcpu);
    
    	return r;
    }
    
    
    /*
     * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
     * we have asm/x86/processor.h
     */
    struct fxsave {
    	u16	cwd;
    	u16	swd;
    	u16	twd;
    	u16	fop;
    	u64	rip;
    	u64	rdp;
    	u32	mxcsr;
    	u32	mxcsr_mask;
    	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
    #ifdef CONFIG_X86_64
    	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 256 bytes */
    #else
    	u32	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
    #endif
    };
    
    
    /*
     * Translate a guest virtual address to a guest physical address.
     */
    int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
    				    struct kvm_translation *tr)
    {
    	unsigned long vaddr = tr->linear_address;
    	gpa_t gpa;
    
    	vcpu_load(vcpu);
    	mutex_lock(&vcpu->kvm->lock);
    	gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
    	tr->physical_address = gpa;
    	tr->valid = gpa != UNMAPPED_GVA;
    	tr->writeable = 1;
    	tr->usermode = 0;
    	mutex_unlock(&vcpu->kvm->lock);
    	vcpu_put(vcpu);
    
    	return 0;
    }
    
    
    int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
    {
    	struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
    
    	vcpu_load(vcpu);
    
    	memcpy(fpu->fpr, fxsave->st_space, 128);
    	fpu->fcw = fxsave->cwd;
    	fpu->fsw = fxsave->swd;
    	fpu->ftwx = fxsave->twd;
    	fpu->last_opcode = fxsave->fop;
    	fpu->last_ip = fxsave->rip;
    	fpu->last_dp = fxsave->rdp;
    	memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
    
    	vcpu_put(vcpu);
    
    	return 0;
    }
    
    int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
    {
    	struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
    
    	vcpu_load(vcpu);
    
    	memcpy(fxsave->st_space, fpu->fpr, 128);
    	fxsave->cwd = fpu->fcw;
    	fxsave->swd = fpu->fsw;
    	fxsave->twd = fpu->ftwx;
    	fxsave->fop = fpu->last_opcode;
    	fxsave->rip = fpu->last_ip;
    	fxsave->rdp = fpu->last_dp;
    	memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
    
    	vcpu_put(vcpu);
    
    	return 0;
    }
    
    void fx_init(struct kvm_vcpu *vcpu)
    {
    	unsigned after_mxcsr_mask;
    
    	/* Initialize guest FPU by resetting ours and saving into guest's */
    	preempt_disable();
    	fx_save(&vcpu->host_fx_image);
    	fpu_init();
    	fx_save(&vcpu->guest_fx_image);
    	fx_restore(&vcpu->host_fx_image);
    	preempt_enable();
    
    	vcpu->cr0 |= X86_CR0_ET;
    	after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
    	vcpu->guest_fx_image.mxcsr = 0x1f80;
    	memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
    	       0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
    }
    EXPORT_SYMBOL_GPL(fx_init);
    
    void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
    {
    	if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
    		return;
    
    	vcpu->guest_fpu_loaded = 1;
    	fx_save(&vcpu->host_fx_image);
    	fx_restore(&vcpu->guest_fx_image);
    }
    EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
    
    void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
    {
    	if (!vcpu->guest_fpu_loaded)
    		return;
    
    	vcpu->guest_fpu_loaded = 0;
    	fx_save(&vcpu->guest_fx_image);
    	fx_restore(&vcpu->host_fx_image);
    
    Avi Kivity's avatar
    Avi Kivity committed
    	++vcpu->stat.fpu_reload;
    
    }
    EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
    
    
    void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
    {
    	kvm_x86_ops->vcpu_free(vcpu);
    }
    
    struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
    						unsigned int id)
    {
    
    	return kvm_x86_ops->vcpu_create(kvm, id);
    }
    
    int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
    {
    	int r;
    
    
    	/* We do fxsave: this must be aligned. */
    	BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
    
    	vcpu_load(vcpu);
    	r = kvm_arch_vcpu_reset(vcpu);
    	if (r == 0)
    		r = kvm_mmu_setup(vcpu);
    	vcpu_put(vcpu);
    	if (r < 0)
    		goto free_vcpu;
    
    
    free_vcpu:
    	kvm_x86_ops->vcpu_free(vcpu);
    
    void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
    
    {
    	vcpu_load(vcpu);
    	kvm_mmu_unload(vcpu);
    	vcpu_put(vcpu);
    
    	kvm_x86_ops->vcpu_free(vcpu);
    }
    
    int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
    {
    	return kvm_x86_ops->vcpu_reset(vcpu);
    }
    
    void kvm_arch_hardware_enable(void *garbage)
    {
    	kvm_x86_ops->hardware_enable(garbage);
    }
    
    void kvm_arch_hardware_disable(void *garbage)
    {
    	kvm_x86_ops->hardware_disable(garbage);
    }
    
    int kvm_arch_hardware_setup(void)
    {
    	return kvm_x86_ops->hardware_setup();
    }
    
    void kvm_arch_hardware_unsetup(void)
    {
    	kvm_x86_ops->hardware_unsetup();
    }
    
    void kvm_arch_check_processor_compat(void *rtn)
    {
    	kvm_x86_ops->check_processor_compatibility(rtn);
    }
    
    int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
    {
    	struct page *page;
    	struct kvm *kvm;
    	int r;
    
    	BUG_ON(vcpu->kvm == NULL);
    	kvm = vcpu->kvm;
    
    	vcpu->mmu.root_hpa = INVALID_PAGE;
    	if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
    		vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
    	else
    		vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
    
    	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
    	if (!page) {
    		r = -ENOMEM;
    		goto fail;
    	}
    	vcpu->pio_data = page_address(page);
    
    	r = kvm_mmu_create(vcpu);
    	if (r < 0)
    		goto fail_free_pio_data;
    
    	if (irqchip_in_kernel(kvm)) {
    		r = kvm_create_lapic(vcpu);
    		if (r < 0)
    			goto fail_mmu_destroy;
    	}
    
    	return 0;
    
    fail_mmu_destroy:
    	kvm_mmu_destroy(vcpu);
    fail_free_pio_data:
    	free_page((unsigned long)vcpu->pio_data);
    fail:
    	return r;
    }
    
    void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
    {
    	kvm_free_lapic(vcpu);
    	kvm_mmu_destroy(vcpu);
    	free_page((unsigned long)vcpu->pio_data);
    }
    
    
    struct  kvm *kvm_arch_create_vm(void)
    {
    	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
    
    	if (!kvm)
    		return ERR_PTR(-ENOMEM);
    
    	INIT_LIST_HEAD(&kvm->active_mmu_pages);
    
    	return kvm;
    }
    
    static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
    {
    	vcpu_load(vcpu);
    	kvm_mmu_unload(vcpu);
    	vcpu_put(vcpu);
    }
    
    static void kvm_free_vcpus(struct kvm *kvm)
    {
    	unsigned int i;
    
    	/*
    	 * Unpin any mmu pages first.
    	 */
    	for (i = 0; i < KVM_MAX_VCPUS; ++i)
    		if (kvm->vcpus[i])
    			kvm_unload_vcpu_mmu(kvm->vcpus[i]);
    	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
    		if (kvm->vcpus[i]) {
    			kvm_arch_vcpu_free(kvm->vcpus[i]);
    			kvm->vcpus[i] = NULL;
    		}
    	}
    
    }
    
    void kvm_arch_destroy_vm(struct kvm *kvm)
    {
    	kfree(kvm->vpic);
    	kfree(kvm->vioapic);
    	kvm_free_vcpus(kvm);
    	kvm_free_physmem(kvm);
    	kfree(kvm);
    }
    
    
    int kvm_arch_set_memory_region(struct kvm *kvm,
    				struct kvm_userspace_memory_region *mem,
    				struct kvm_memory_slot old,
    				int user_alloc)
    {
    	int npages = mem->memory_size >> PAGE_SHIFT;
    	struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
    
    	/*To keep backward compatibility with older userspace,
    	 *x86 needs to hanlde !user_alloc case.
    	 */
    	if (!user_alloc) {
    		if (npages && !old.rmap) {
    			down_write(&current->mm->mmap_sem);
    			memslot->userspace_addr = do_mmap(NULL, 0,
    						     npages * PAGE_SIZE,
    						     PROT_READ | PROT_WRITE,
    						     MAP_SHARED | MAP_ANONYMOUS,
    						     0);
    			up_write(&current->mm->mmap_sem);
    
    			if (IS_ERR((void *)memslot->userspace_addr))
    				return PTR_ERR((void *)memslot->userspace_addr);
    		} else {
    			if (!old.user_alloc && old.rmap) {
    				int ret;
    
    				down_write(&current->mm->mmap_sem);
    				ret = do_munmap(current->mm, old.userspace_addr,
    						old.npages * PAGE_SIZE);
    				up_write(&current->mm->mmap_sem);
    				if (ret < 0)
    					printk(KERN_WARNING
    				       "kvm_vm_ioctl_set_memory_region: "
    				       "failed to munmap memory\n");
    			}
    		}
    	}
    
    	if (!kvm->n_requested_mmu_pages) {
    		unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
    		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
    	}
    
    	kvm_mmu_slot_remove_write_access(kvm, mem->slot);
    	kvm_flush_remote_tlbs(kvm);
    
    	return 0;
    }