[PATCH] KVM: MMU: oom handling

When beginning to process a page fault, make sure we have enough shadow pages
available to service the fault.  If not, free some pages.

Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index cf4b74c..03c474a 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -246,8 +246,6 @@
 		}
 		shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
 					       metaphysical, shadow_ent);
-		if (!shadow_page)
-			return ERR_PTR(-ENOMEM);
 		shadow_addr = shadow_page->page_hpa;
 		shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
 			| PT_WRITABLE_MASK | PT_USER_MASK;
@@ -347,17 +345,8 @@
 	/*
 	 * Look up the shadow pte for the faulting address.
 	 */
-	for (;;) {
-		FNAME(walk_addr)(&walker, vcpu, addr);
-		shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
-		if (IS_ERR(shadow_pte)) {  /* must be -ENOMEM */
-			printk("%s: oom\n", __FUNCTION__);
-			nonpaging_flush(vcpu);
-			FNAME(release_walker)(&walker);
-			continue;
-		}
-		break;
-	}
+	FNAME(walk_addr)(&walker, vcpu, addr);
+	shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
 
 	/*
 	 * The page is not mapped by the guest.  Let the guest handle it.