| Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 1 | #ifndef __KVM_X86_MMU_H | 
|  | 2 | #define __KVM_X86_MMU_H | 
|  | 3 |  | 
| Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 4 | #include <linux/kvm_host.h> | 
| Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 5 |  | 
|  | 6 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | 
|  | 7 | { | 
| Zhang Xiantao | f05e70a | 2007-12-14 10:01:48 +0800 | [diff] [blame] | 8 | if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) | 
| Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 9 | __kvm_mmu_free_some_pages(vcpu); | 
|  | 10 | } | 
|  | 11 |  | 
|  | 12 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) | 
|  | 13 | { | 
|  | 14 | if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) | 
|  | 15 | return 0; | 
|  | 16 |  | 
|  | 17 | return kvm_mmu_load(vcpu); | 
|  | 18 | } | 
|  | 19 |  | 
|  | 20 | static inline int is_long_mode(struct kvm_vcpu *vcpu) | 
|  | 21 | { | 
|  | 22 | #ifdef CONFIG_X86_64 | 
|  | 23 | return vcpu->arch.shadow_efer & EFER_LME; | 
|  | 24 | #else | 
|  | 25 | return 0; | 
|  | 26 | #endif | 
|  | 27 | } | 
|  | 28 |  | 
|  | 29 | static inline int is_pae(struct kvm_vcpu *vcpu) | 
|  | 30 | { | 
|  | 31 | return vcpu->arch.cr4 & X86_CR4_PAE; | 
|  | 32 | } | 
|  | 33 |  | 
|  | 34 | static inline int is_pse(struct kvm_vcpu *vcpu) | 
|  | 35 | { | 
|  | 36 | return vcpu->arch.cr4 & X86_CR4_PSE; | 
|  | 37 | } | 
|  | 38 |  | 
|  | 39 | static inline int is_paging(struct kvm_vcpu *vcpu) | 
|  | 40 | { | 
|  | 41 | return vcpu->arch.cr0 & X86_CR0_PG; | 
|  | 42 | } | 
|  | 43 |  | 
|  | 44 | #endif |