blob: b976d8025e58d2ea840178f32da5162b042b7cf6 [file] [log] [blame]
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001/*
Scott Wood49ea0692011-03-28 15:01:24 -05002 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06003 *
4 * Author: Yu Liu, yu.liu@freescale.com
5 *
6 * Description:
7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060017#include <linux/string.h>
18#include <linux/kvm.h>
19#include <linux/kvm_host.h>
20#include <linux/highmem.h>
21#include <asm/kvm_ppc.h>
22#include <asm/kvm_e500.h>
23
Liu Yu9aa4dd52009-01-14 10:47:38 -060024#include "../mm/mmu_decl.h"
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060025#include "e500_tlb.h"
Marcelo Tosatti46f43c62009-06-18 11:47:27 -030026#include "trace.h"
Scott Wood49ea0692011-03-28 15:01:24 -050027#include "timing.h"
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060028
29#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
30
Liu Yudd9ebf1f2011-06-14 18:35:14 -050031struct id {
32 unsigned long val;
33 struct id **pentry;
34};
35
36#define NUM_TIDS 256
37
38/*
39 * This table provide mappings from:
40 * (guestAS,guestTID,guestPR) --> ID of physical cpu
41 * guestAS [0..1]
42 * guestTID [0..255]
43 * guestPR [0..1]
44 * ID [1..255]
45 * Each vcpu keeps one vcpu_id_table.
46 */
47struct vcpu_id_table {
48 struct id id[2][NUM_TIDS][2];
49};
50
51/*
52 * This table provide reversed mappings of vcpu_id_table:
53 * ID --> address of vcpu_id_table item.
54 * Each physical core has one pcpu_id_table.
55 */
56struct pcpu_id_table {
57 struct id *entry[NUM_TIDS];
58};
59
60static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
61
62/* This variable keeps last used shadow ID on local core.
63 * The valid range of shadow ID is [1..255] */
64static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
65
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060066static unsigned int tlb1_entry_num;
67
Liu Yudd9ebf1f2011-06-14 18:35:14 -050068/*
69 * Allocate a free shadow id and setup a valid sid mapping in given entry.
70 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
71 *
72 * The caller must have preemption disabled, and keep it that way until
73 * it has finished with the returned shadow id (either written into the
74 * TLB or arch.shadow_pid, or discarded).
75 */
76static inline int local_sid_setup_one(struct id *entry)
77{
78 unsigned long sid;
79 int ret = -1;
80
81 sid = ++(__get_cpu_var(pcpu_last_used_sid));
82 if (sid < NUM_TIDS) {
83 __get_cpu_var(pcpu_sids).entry[sid] = entry;
84 entry->val = sid;
85 entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
86 ret = sid;
87 }
88
89 /*
90 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
91 * the caller will invalidate everything and start over.
92 *
93 * sid > NUM_TIDS indicates a race, which we disable preemption to
94 * avoid.
95 */
96 WARN_ON(sid > NUM_TIDS);
97
98 return ret;
99}
100
101/*
102 * Check if given entry contain a valid shadow id mapping.
103 * An ID mapping is considered valid only if
104 * both vcpu and pcpu know this mapping.
105 *
106 * The caller must have preemption disabled, and keep it that way until
107 * it has finished with the returned shadow id (either written into the
108 * TLB or arch.shadow_pid, or discarded).
109 */
110static inline int local_sid_lookup(struct id *entry)
111{
112 if (entry && entry->val != 0 &&
113 __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
114 entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
115 return entry->val;
116 return -1;
117}
118
Scott Wood90b92a62011-08-18 15:25:16 -0500119/* Invalidate all id mappings on local core -- call with preempt disabled */
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500120static inline void local_sid_destroy_all(void)
121{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500122 __get_cpu_var(pcpu_last_used_sid) = 0;
123 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500124}
125
126static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
127{
128 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
129 return vcpu_e500->idt;
130}
131
132static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
133{
134 kfree(vcpu_e500->idt);
135}
136
137/* Invalidate all mappings on vcpu */
138static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
139{
140 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
141
142 /* Update shadow pid when mappings are changed */
143 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
144}
145
146/* Invalidate one ID mapping on vcpu */
147static inline void kvmppc_e500_id_table_reset_one(
148 struct kvmppc_vcpu_e500 *vcpu_e500,
149 int as, int pid, int pr)
150{
151 struct vcpu_id_table *idt = vcpu_e500->idt;
152
153 BUG_ON(as >= 2);
154 BUG_ON(pid >= NUM_TIDS);
155 BUG_ON(pr >= 2);
156
157 idt->id[as][pid][pr].val = 0;
158 idt->id[as][pid][pr].pentry = NULL;
159
160 /* Update shadow pid when mappings are changed */
161 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
162}
163
164/*
165 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
166 * This function first lookup if a valid mapping exists,
167 * if not, then creates a new one.
168 *
169 * The caller must have preemption disabled, and keep it that way until
170 * it has finished with the returned shadow id (either written into the
171 * TLB or arch.shadow_pid, or discarded).
172 */
173static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
174 unsigned int as, unsigned int gid,
175 unsigned int pr, int avoid_recursion)
176{
177 struct vcpu_id_table *idt = vcpu_e500->idt;
178 int sid;
179
180 BUG_ON(as >= 2);
181 BUG_ON(gid >= NUM_TIDS);
182 BUG_ON(pr >= 2);
183
184 sid = local_sid_lookup(&idt->id[as][gid][pr]);
185
186 while (sid <= 0) {
187 /* No mapping yet */
188 sid = local_sid_setup_one(&idt->id[as][gid][pr]);
189 if (sid <= 0) {
190 _tlbil_all();
191 local_sid_destroy_all();
192 }
193
194 /* Update shadow pid when mappings are changed */
195 if (!avoid_recursion)
196 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
197 }
198
199 return sid;
200}
201
202/* Map guest pid to shadow.
203 * We use PID to keep shadow of current guest non-zero PID,
204 * and use PID1 to keep shadow of guest zero PID.
205 * So that guest tlbe with TID=0 can be accessed at any time */
206void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
207{
208 preempt_disable();
209 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
210 get_cur_as(&vcpu_e500->vcpu),
211 get_cur_pid(&vcpu_e500->vcpu),
212 get_cur_pr(&vcpu_e500->vcpu), 1);
213 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
214 get_cur_as(&vcpu_e500->vcpu), 0,
215 get_cur_pr(&vcpu_e500->vcpu), 1);
216 preempt_enable();
217}
218
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600219void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
220{
221 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
222 struct tlbe *tlbe;
223 int i, tlbsel;
224
225 printk("| %8s | %8s | %8s | %8s | %8s |\n",
226 "nr", "mas1", "mas2", "mas3", "mas7");
227
228 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
229 printk("Guest TLB%d:\n", tlbsel);
Liu Yu08b7fa92011-06-14 18:34:59 -0500230 for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
231 tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600232 if (tlbe->mas1 & MAS1_VALID)
233 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
234 tlbsel, i, tlbe->mas1, tlbe->mas2,
235 tlbe->mas3, tlbe->mas7);
236 }
237 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600238}
239
240static inline unsigned int tlb0_get_next_victim(
241 struct kvmppc_vcpu_e500 *vcpu_e500)
242{
243 unsigned int victim;
244
Liu Yu08b7fa92011-06-14 18:34:59 -0500245 victim = vcpu_e500->gtlb_nv[0]++;
246 if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
247 vcpu_e500->gtlb_nv[0] = 0;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600248
249 return victim;
250}
251
252static inline unsigned int tlb1_max_shadow_size(void)
253{
Scott Wooda4cd8b22011-06-14 18:34:41 -0500254 /* reserve one entry for magic page */
255 return tlb1_entry_num - tlbcam_index - 1;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600256}
257
258static inline int tlbe_is_writable(struct tlbe *tlbe)
259{
260 return tlbe->mas3 & (MAS3_SW|MAS3_UW);
261}
262
263static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
264{
265 /* Mask off reserved bits. */
266 mas3 &= MAS3_ATTRIB_MASK;
267
268 if (!usermode) {
269 /* Guest is in supervisor mode,
270 * so we need to translate guest
271 * supervisor permissions into user permissions. */
272 mas3 &= ~E500_TLB_USER_PERM_MASK;
273 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
274 }
275
276 return mas3 | E500_TLB_SUPER_PERM_MASK;
277}
278
279static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
280{
Liu Yu046a48b2009-03-17 16:57:46 +0800281#ifdef CONFIG_SMP
282 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
283#else
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600284 return mas2 & MAS2_ATTRIB_MASK;
Liu Yu046a48b2009-03-17 16:57:46 +0800285#endif
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600286}
287
288/*
289 * writing shadow tlb entry to host TLB
290 */
Scott Wood0ef30992011-06-14 18:34:35 -0500291static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600292{
Scott Wood0ef30992011-06-14 18:34:35 -0500293 unsigned long flags;
294
295 local_irq_save(flags);
296 mtspr(SPRN_MAS0, mas0);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600297 mtspr(SPRN_MAS1, stlbe->mas1);
298 mtspr(SPRN_MAS2, stlbe->mas2);
299 mtspr(SPRN_MAS3, stlbe->mas3);
300 mtspr(SPRN_MAS7, stlbe->mas7);
Scott Wood0ef30992011-06-14 18:34:35 -0500301 asm volatile("isync; tlbwe" : : : "memory");
302 local_irq_restore(flags);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600303}
304
305static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
Liu Yu08b7fa92011-06-14 18:34:59 -0500306 int tlbsel, int esel, struct tlbe *stlbe)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600307{
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600308 if (tlbsel == 0) {
Scott Wood0ef30992011-06-14 18:34:35 -0500309 __write_host_tlbe(stlbe,
310 MAS0_TLBSEL(0) |
311 MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600312 } else {
Scott Wood0ef30992011-06-14 18:34:35 -0500313 __write_host_tlbe(stlbe,
314 MAS0_TLBSEL(1) |
315 MAS0_ESEL(to_htlb1_esel(esel)));
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600316 }
Liu Yu08b7fa92011-06-14 18:34:59 -0500317 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
318 stlbe->mas3, stlbe->mas7);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600319}
320
Scott Wooda4cd8b22011-06-14 18:34:41 -0500321void kvmppc_map_magic(struct kvm_vcpu *vcpu)
322{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500323 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
Scott Wooda4cd8b22011-06-14 18:34:41 -0500324 struct tlbe magic;
325 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500326 unsigned int stid;
Scott Wooda4cd8b22011-06-14 18:34:41 -0500327 pfn_t pfn;
328
329 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
330 get_page(pfn_to_page(pfn));
331
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500332 preempt_disable();
333 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
334
335 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
Scott Wooda4cd8b22011-06-14 18:34:41 -0500336 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
337 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
338 magic.mas3 = (pfn << PAGE_SHIFT) |
339 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
340 magic.mas7 = pfn >> (32 - PAGE_SHIFT);
341
342 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500343 preempt_enable();
Scott Wooda4cd8b22011-06-14 18:34:41 -0500344}
345
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600346void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
347{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500348 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
349
350 /* Shadow PID may be expired on local core */
351 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600352}
353
354void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
355{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500356}
357
358static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
359 int tlbsel, int esel)
360{
361 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
362 struct vcpu_id_table *idt = vcpu_e500->idt;
363 unsigned int pr, tid, ts, pid;
364 u32 val, eaddr;
365 unsigned long flags;
366
367 ts = get_tlb_ts(gtlbe);
368 tid = get_tlb_tid(gtlbe);
369
370 preempt_disable();
371
372 /* One guest ID may be mapped to two shadow IDs */
373 for (pr = 0; pr < 2; pr++) {
374 /*
375 * The shadow PID can have a valid mapping on at most one
376 * host CPU. In the common case, it will be valid on this
377 * CPU, in which case (for TLB0) we do a local invalidation
378 * of the specific address.
379 *
380 * If the shadow PID is not valid on the current host CPU, or
381 * if we're invalidating a TLB1 entry, we invalidate the
382 * entire shadow PID.
383 */
384 if (tlbsel == 1 ||
385 (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
386 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
387 continue;
388 }
389
390 /*
391 * The guest is invalidating a TLB0 entry which is in a PID
392 * that has a valid shadow mapping on this host CPU. We
393 * search host TLB0 to invalidate it's shadow TLB entry,
394 * similar to __tlbil_va except that we need to look in AS1.
395 */
396 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
397 eaddr = get_tlb_eaddr(gtlbe);
398
399 local_irq_save(flags);
400
401 mtspr(SPRN_MAS6, val);
402 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
403 val = mfspr(SPRN_MAS1);
404 if (val & MAS1_VALID) {
405 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
406 asm volatile("tlbwe");
407 }
408
409 local_irq_restore(flags);
410 }
411
412 preempt_enable();
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600413}
414
415/* Search the guest TLB for a matching entry. */
416static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
417 gva_t eaddr, int tlbsel, unsigned int pid, int as)
418{
Scott Wood1aee47a2011-06-14 18:35:20 -0500419 int size = vcpu_e500->gtlb_size[tlbsel];
420 int set_base;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600421 int i;
422
Scott Wood1aee47a2011-06-14 18:35:20 -0500423 if (tlbsel == 0) {
424 int mask = size / KVM_E500_TLB0_WAY_NUM - 1;
425 set_base = (eaddr >> PAGE_SHIFT) & mask;
426 set_base *= KVM_E500_TLB0_WAY_NUM;
427 size = KVM_E500_TLB0_WAY_NUM;
428 } else {
429 set_base = 0;
430 }
431
432 for (i = 0; i < size; i++) {
433 struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600434 unsigned int tid;
435
436 if (eaddr < get_tlb_eaddr(tlbe))
437 continue;
438
439 if (eaddr > get_tlb_end(tlbe))
440 continue;
441
442 tid = get_tlb_tid(tlbe);
443 if (tid && (tid != pid))
444 continue;
445
446 if (!get_tlb_v(tlbe))
447 continue;
448
449 if (get_tlb_ts(tlbe) != as && as != -1)
450 continue;
451
Scott Wood1aee47a2011-06-14 18:35:20 -0500452 return set_base + i;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600453 }
454
455 return -1;
456}
457
Liu Yu08b7fa92011-06-14 18:34:59 -0500458static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv,
459 struct tlbe *gtlbe,
460 pfn_t pfn)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600461{
Liu Yu08b7fa92011-06-14 18:34:59 -0500462 priv->pfn = pfn;
463 priv->flags = E500_TLB_VALID;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600464
Liu Yu08b7fa92011-06-14 18:34:59 -0500465 if (tlbe_is_writable(gtlbe))
466 priv->flags |= E500_TLB_DIRTY;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600467}
468
Liu Yu08b7fa92011-06-14 18:34:59 -0500469static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600470{
Liu Yu08b7fa92011-06-14 18:34:59 -0500471 if (priv->flags & E500_TLB_VALID) {
472 if (priv->flags & E500_TLB_DIRTY)
473 kvm_release_pfn_dirty(priv->pfn);
474 else
475 kvm_release_pfn_clean(priv->pfn);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600476
Liu Yu08b7fa92011-06-14 18:34:59 -0500477 priv->flags = 0;
478 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600479}
480
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600481static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
482 unsigned int eaddr, int as)
483{
484 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
485 unsigned int victim, pidsel, tsized;
486 int tlbsel;
487
Liu Yufb2838d2009-01-14 10:47:37 -0600488 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600489 tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
490 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
491 pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
Liu Yu0cfb50e2009-06-05 14:54:29 +0800492 tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600493
494 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
Liu Yu08b7fa92011-06-14 18:34:59 -0500495 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600496 vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
497 | MAS1_TID(vcpu_e500->pid[pidsel])
498 | MAS1_TSIZE(tsized);
499 vcpu_e500->mas2 = (eaddr & MAS2_EPN)
500 | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
501 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
502 vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
503 | (get_cur_pid(vcpu) << 16)
504 | (as ? MAS6_SAS : 0);
505 vcpu_e500->mas7 = 0;
506}
507
Scott Wood3bf3cdc2011-08-18 15:25:14 -0500508/* TID must be supplied by the caller */
Liu Yu08b7fa92011-06-14 18:34:59 -0500509static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
510 struct tlbe *gtlbe, int tsize,
511 struct tlbe_priv *priv,
512 u64 gvaddr, struct tlbe *stlbe)
513{
514 pfn_t pfn = priv->pfn;
515
516 /* Force TS=1 IPROT=0 for all guest mappings. */
Scott Wood3bf3cdc2011-08-18 15:25:14 -0500517 stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID;
Liu Yu08b7fa92011-06-14 18:34:59 -0500518 stlbe->mas2 = (gvaddr & MAS2_EPN)
519 | e500_shadow_mas2_attrib(gtlbe->mas2,
520 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
521 stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
522 | e500_shadow_mas3_attrib(gtlbe->mas3,
523 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
524 stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
525}
526
527
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600528static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
Liu Yu08b7fa92011-06-14 18:34:59 -0500529 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel,
530 struct tlbe *stlbe)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600531{
Scott Wood9973d542011-06-14 18:34:39 -0500532 struct kvm_memory_slot *slot;
Scott Wood9973d542011-06-14 18:34:39 -0500533 unsigned long pfn, hva;
534 int pfnmap = 0;
535 int tsize = BOOK3E_PAGESZ_4K;
Liu Yu08b7fa92011-06-14 18:34:59 -0500536 struct tlbe_priv *priv;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600537
Scott Wood59c1f4e2011-06-14 18:34:37 -0500538 /*
539 * Translate guest physical to true physical, acquiring
540 * a page reference if it is normal, non-reserved memory.
Scott Wood9973d542011-06-14 18:34:39 -0500541 *
542 * gfn_to_memslot() must succeed because otherwise we wouldn't
543 * have gotten this far. Eventually we should just pass the slot
544 * pointer through from the first lookup.
Scott Wood59c1f4e2011-06-14 18:34:37 -0500545 */
Scott Wood9973d542011-06-14 18:34:39 -0500546 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
547 hva = gfn_to_hva_memslot(slot, gfn);
548
549 if (tlbsel == 1) {
550 struct vm_area_struct *vma;
551 down_read(&current->mm->mmap_sem);
552
553 vma = find_vma(current->mm, hva);
554 if (vma && hva >= vma->vm_start &&
555 (vma->vm_flags & VM_PFNMAP)) {
556 /*
557 * This VMA is a physically contiguous region (e.g.
558 * /dev/mem) that bypasses normal Linux page
559 * management. Find the overlap between the
560 * vma and the memslot.
561 */
562
563 unsigned long start, end;
564 unsigned long slot_start, slot_end;
565
566 pfnmap = 1;
567
568 start = vma->vm_pgoff;
569 end = start +
570 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
571
572 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
573
574 slot_start = pfn - (gfn - slot->base_gfn);
575 slot_end = slot_start + slot->npages;
576
577 if (start < slot_start)
578 start = slot_start;
579 if (end > slot_end)
580 end = slot_end;
581
582 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
583 MAS1_TSIZE_SHIFT;
584
585 /*
586 * e500 doesn't implement the lowest tsize bit,
587 * or 1K pages.
588 */
589 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
590
591 /*
592 * Now find the largest tsize (up to what the guest
593 * requested) that will cover gfn, stay within the
594 * range, and for which gfn and pfn are mutually
595 * aligned.
596 */
597
598 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
599 unsigned long gfn_start, gfn_end, tsize_pages;
600 tsize_pages = 1 << (tsize - 2);
601
602 gfn_start = gfn & ~(tsize_pages - 1);
603 gfn_end = gfn_start + tsize_pages;
604
605 if (gfn_start + pfn - gfn < start)
606 continue;
607 if (gfn_end + pfn - gfn > end)
608 continue;
609 if ((gfn & (tsize_pages - 1)) !=
610 (pfn & (tsize_pages - 1)))
611 continue;
612
613 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
614 pfn &= ~(tsize_pages - 1);
615 break;
616 }
617 }
618
619 up_read(&current->mm->mmap_sem);
620 }
621
622 if (likely(!pfnmap)) {
623 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
624 if (is_error_pfn(pfn)) {
625 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
626 (long)gfn);
627 kvm_release_pfn_clean(pfn);
628 return;
629 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600630 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600631
Liu Yu08b7fa92011-06-14 18:34:59 -0500632 /* Drop old priv and setup new one. */
633 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
634 kvmppc_e500_priv_release(priv);
635 kvmppc_e500_priv_setup(priv, gtlbe, pfn);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600636
Liu Yu08b7fa92011-06-14 18:34:59 -0500637 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600638}
639
640/* XXX only map the one-one case, for now use TLB0 */
Liu Yu08b7fa92011-06-14 18:34:59 -0500641static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
642 int esel, struct tlbe *stlbe)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600643{
644 struct tlbe *gtlbe;
645
Liu Yu08b7fa92011-06-14 18:34:59 -0500646 gtlbe = &vcpu_e500->gtlb_arch[0][esel];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600647
648 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
649 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
Liu Yu08b7fa92011-06-14 18:34:59 -0500650 gtlbe, 0, esel, stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600651
652 return esel;
653}
654
655/* Caller must ensure that the specified guest TLB entry is safe to insert into
656 * the shadow TLB. */
657/* XXX for both one-one and one-to-many , for now use TLB1 */
658static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
Liu Yu08b7fa92011-06-14 18:34:59 -0500659 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600660{
661 unsigned int victim;
662
Liu Yu08b7fa92011-06-14 18:34:59 -0500663 victim = vcpu_e500->gtlb_nv[1]++;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600664
Liu Yu08b7fa92011-06-14 18:34:59 -0500665 if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size()))
666 vcpu_e500->gtlb_nv[1] = 0;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600667
Liu Yu08b7fa92011-06-14 18:34:59 -0500668 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600669
670 return victim;
671}
672
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500673void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600674{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500675 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
676
677 /* Recalc shadow pid since MSR changes */
678 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600679}
680
Liu Yu08b7fa92011-06-14 18:34:59 -0500681static inline int kvmppc_e500_gtlbe_invalidate(
682 struct kvmppc_vcpu_e500 *vcpu_e500,
683 int tlbsel, int esel)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600684{
Liu Yu08b7fa92011-06-14 18:34:59 -0500685 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600686
687 if (unlikely(get_tlb_iprot(gtlbe)))
688 return -1;
689
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600690 gtlbe->mas1 = 0;
691
692 return 0;
693}
694
Liu Yub0a18352009-02-17 16:52:08 +0800695int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
696{
697 int esel;
698
699 if (value & MMUCSR0_TLB0FI)
Liu Yu08b7fa92011-06-14 18:34:59 -0500700 for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
Liu Yub0a18352009-02-17 16:52:08 +0800701 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
702 if (value & MMUCSR0_TLB1FI)
Liu Yu08b7fa92011-06-14 18:34:59 -0500703 for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
Liu Yub0a18352009-02-17 16:52:08 +0800704 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
705
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500706 /* Invalidate all vcpu id mappings */
707 kvmppc_e500_id_table_reset_all(vcpu_e500);
Liu Yub0a18352009-02-17 16:52:08 +0800708
709 return EMULATE_DONE;
710}
711
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600712int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
713{
714 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
715 unsigned int ia;
716 int esel, tlbsel;
717 gva_t ea;
718
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100719 ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600720
721 ia = (ea >> 2) & 0x1;
722
Liu Yufb2838d2009-01-14 10:47:37 -0600723 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600724 tlbsel = (ea >> 3) & 0x1;
725
726 if (ia) {
727 /* invalidate all entries */
Liu Yu08b7fa92011-06-14 18:34:59 -0500728 for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600729 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
730 } else {
731 ea &= 0xfffff000;
732 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
733 get_cur_pid(vcpu), -1);
734 if (esel >= 0)
735 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
736 }
737
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500738 /* Invalidate all vcpu id mappings */
739 kvmppc_e500_id_table_reset_all(vcpu_e500);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600740
741 return EMULATE_DONE;
742}
743
744int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
745{
746 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
747 int tlbsel, esel;
748 struct tlbe *gtlbe;
749
750 tlbsel = get_tlb_tlbsel(vcpu_e500);
751 esel = get_tlb_esel(vcpu_e500, tlbsel);
752
Liu Yu08b7fa92011-06-14 18:34:59 -0500753 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
Liu Yubc35cbc2009-03-17 16:57:45 +0800754 vcpu_e500->mas0 &= ~MAS0_NV(~0);
Liu Yu08b7fa92011-06-14 18:34:59 -0500755 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600756 vcpu_e500->mas1 = gtlbe->mas1;
757 vcpu_e500->mas2 = gtlbe->mas2;
758 vcpu_e500->mas3 = gtlbe->mas3;
759 vcpu_e500->mas7 = gtlbe->mas7;
760
761 return EMULATE_DONE;
762}
763
764int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
765{
766 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
767 int as = !!get_cur_sas(vcpu_e500);
768 unsigned int pid = get_cur_spid(vcpu_e500);
769 int esel, tlbsel;
770 struct tlbe *gtlbe = NULL;
771 gva_t ea;
772
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100773 ea = kvmppc_get_gpr(vcpu, rb);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600774
775 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
776 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
777 if (esel >= 0) {
Liu Yu08b7fa92011-06-14 18:34:59 -0500778 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600779 break;
780 }
781 }
782
783 if (gtlbe) {
784 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
Liu Yu08b7fa92011-06-14 18:34:59 -0500785 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600786 vcpu_e500->mas1 = gtlbe->mas1;
787 vcpu_e500->mas2 = gtlbe->mas2;
788 vcpu_e500->mas3 = gtlbe->mas3;
789 vcpu_e500->mas7 = gtlbe->mas7;
790 } else {
791 int victim;
792
Liu Yufb2838d2009-01-14 10:47:37 -0600793 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600794 tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
795 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
796
797 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
Liu Yu08b7fa92011-06-14 18:34:59 -0500798 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600799 vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
800 | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
801 | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
802 vcpu_e500->mas2 &= MAS2_EPN;
803 vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
804 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
805 vcpu_e500->mas7 = 0;
806 }
807
Scott Wood49ea0692011-03-28 15:01:24 -0500808 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600809 return EMULATE_DONE;
810}
811
Scott Wood3bf3cdc2011-08-18 15:25:14 -0500812/* sesel is index into the set, not the whole array */
813static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
814 struct tlbe *gtlbe,
815 struct tlbe *stlbe,
816 int stlbsel, int sesel)
817{
818 int stid;
819
820 preempt_disable();
821 stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
822 get_tlb_tid(gtlbe),
823 get_cur_pr(&vcpu_e500->vcpu), 0);
824
825 stlbe->mas1 |= MAS1_TID(stid);
826 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
827 preempt_enable();
828}
829
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600830int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
831{
832 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600833 struct tlbe *gtlbe;
Liu Yu08b7fa92011-06-14 18:34:59 -0500834 int tlbsel, esel;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600835
836 tlbsel = get_tlb_tlbsel(vcpu_e500);
837 esel = get_tlb_esel(vcpu_e500, tlbsel);
838
Liu Yu08b7fa92011-06-14 18:34:59 -0500839 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600840
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500841 if (get_tlb_v(gtlbe))
842 kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600843
844 gtlbe->mas1 = vcpu_e500->mas1;
845 gtlbe->mas2 = vcpu_e500->mas2;
846 gtlbe->mas3 = vcpu_e500->mas3;
847 gtlbe->mas7 = vcpu_e500->mas7;
848
Marcelo Tosatti46f43c62009-06-18 11:47:27 -0300849 trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
850 gtlbe->mas3, gtlbe->mas7);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600851
852 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
853 if (tlbe_is_host_safe(vcpu, gtlbe)) {
Liu Yu08b7fa92011-06-14 18:34:59 -0500854 struct tlbe stlbe;
855 int stlbsel, sesel;
856 u64 eaddr;
857 u64 raddr;
858
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600859 switch (tlbsel) {
860 case 0:
861 /* TLB0 */
862 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
Liu Yu0cfb50e2009-06-05 14:54:29 +0800863 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600864
865 stlbsel = 0;
Liu Yu08b7fa92011-06-14 18:34:59 -0500866 sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600867
868 break;
869
870 case 1:
871 /* TLB1 */
872 eaddr = get_tlb_eaddr(gtlbe);
873 raddr = get_tlb_raddr(gtlbe);
874
875 /* Create a 4KB mapping on the host.
876 * If the guest wanted a large page,
877 * only the first 4KB is mapped here and the rest
878 * are mapped on the fly. */
879 stlbsel = 1;
880 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
Liu Yu08b7fa92011-06-14 18:34:59 -0500881 raddr >> PAGE_SHIFT, gtlbe, &stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600882 break;
883
884 default:
885 BUG();
886 }
Scott Wood3bf3cdc2011-08-18 15:25:14 -0500887
888 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600889 }
890
Scott Wood49ea0692011-03-28 15:01:24 -0500891 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600892 return EMULATE_DONE;
893}
894
895int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
896{
Alexander Graf666e7252010-07-29 14:47:43 +0200897 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600898
899 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
900}
901
902int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
903{
Alexander Graf666e7252010-07-29 14:47:43 +0200904 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600905
906 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
907}
908
909void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
910{
Alexander Graf666e7252010-07-29 14:47:43 +0200911 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600912
913 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
914}
915
916void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
917{
Alexander Graf666e7252010-07-29 14:47:43 +0200918 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600919
920 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
921}
922
923gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
924 gva_t eaddr)
925{
926 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
927 struct tlbe *gtlbe =
Liu Yu08b7fa92011-06-14 18:34:59 -0500928 &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600929 u64 pgmask = get_tlb_bytes(gtlbe) - 1;
930
931 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
932}
933
934void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
935{
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600936}
937
938void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
939 unsigned int index)
940{
941 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
Liu Yu08b7fa92011-06-14 18:34:59 -0500942 struct tlbe_priv *priv;
943 struct tlbe *gtlbe, stlbe;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600944 int tlbsel = tlbsel_of(index);
945 int esel = esel_of(index);
946 int stlbsel, sesel;
947
Liu Yu08b7fa92011-06-14 18:34:59 -0500948 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
949
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600950 switch (tlbsel) {
951 case 0:
952 stlbsel = 0;
953 sesel = esel;
Liu Yu08b7fa92011-06-14 18:34:59 -0500954 priv = &vcpu_e500->gtlb_priv[stlbsel][sesel];
955
956 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
957 priv, eaddr, &stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600958 break;
959
960 case 1: {
961 gfn_t gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600962
963 stlbsel = 1;
Liu Yu08b7fa92011-06-14 18:34:59 -0500964 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
965 gtlbe, &stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600966 break;
967 }
968
969 default:
970 BUG();
971 break;
972 }
Liu Yu08b7fa92011-06-14 18:34:59 -0500973
Scott Wood3bf3cdc2011-08-18 15:25:14 -0500974 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600975}
976
977int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
978 gva_t eaddr, unsigned int pid, int as)
979{
980 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
981 int esel, tlbsel;
982
983 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
984 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
985 if (esel >= 0)
986 return index_of(tlbsel, esel);
987 }
988
989 return -1;
990}
991
Scott Wood5ce941e2011-04-27 17:24:21 -0500992void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
993{
994 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
995
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500996 if (vcpu->arch.pid != pid) {
997 vcpu_e500->pid[0] = vcpu->arch.pid = pid;
998 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
999 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001000}
1001
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001002void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
1003{
1004 struct tlbe *tlbe;
1005
1006 /* Insert large initial mapping for guest. */
Liu Yu08b7fa92011-06-14 18:34:59 -05001007 tlbe = &vcpu_e500->gtlb_arch[1][0];
Liu Yu0cfb50e2009-06-05 14:54:29 +08001008 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001009 tlbe->mas2 = 0;
1010 tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
1011 tlbe->mas7 = 0;
1012
1013 /* 4K map for serial output. Used by kernel wrapper. */
Liu Yu08b7fa92011-06-14 18:34:59 -05001014 tlbe = &vcpu_e500->gtlb_arch[1][1];
Liu Yu0cfb50e2009-06-05 14:54:29 +08001015 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001016 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
1017 tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
1018 tlbe->mas7 = 0;
1019}
1020
1021int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1022{
1023 tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
1024
Liu Yu08b7fa92011-06-14 18:34:59 -05001025 vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
1026 vcpu_e500->gtlb_arch[0] =
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001027 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
Liu Yu08b7fa92011-06-14 18:34:59 -05001028 if (vcpu_e500->gtlb_arch[0] == NULL)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001029 goto err_out;
1030
Liu Yu08b7fa92011-06-14 18:34:59 -05001031 vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
1032 vcpu_e500->gtlb_arch[1] =
1033 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
1034 if (vcpu_e500->gtlb_arch[1] == NULL)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001035 goto err_out_guest0;
1036
Liu Yu08b7fa92011-06-14 18:34:59 -05001037 vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *)
1038 kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
1039 if (vcpu_e500->gtlb_priv[0] == NULL)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001040 goto err_out_guest1;
Liu Yu08b7fa92011-06-14 18:34:59 -05001041 vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *)
1042 kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
1043
1044 if (vcpu_e500->gtlb_priv[1] == NULL)
1045 goto err_out_priv0;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001046
Liu Yudd9ebf1f2011-06-14 18:35:14 -05001047 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
1048 goto err_out_priv1;
1049
Liu Yuda15bf42010-01-22 19:36:53 +08001050 /* Init TLB configuration register */
1051 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
Liu Yu08b7fa92011-06-14 18:34:59 -05001052 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
Liu Yuda15bf42010-01-22 19:36:53 +08001053 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
Liu Yu08b7fa92011-06-14 18:34:59 -05001054 vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
Liu Yuda15bf42010-01-22 19:36:53 +08001055
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001056 return 0;
1057
Liu Yudd9ebf1f2011-06-14 18:35:14 -05001058err_out_priv1:
1059 kfree(vcpu_e500->gtlb_priv[1]);
Liu Yu08b7fa92011-06-14 18:34:59 -05001060err_out_priv0:
1061 kfree(vcpu_e500->gtlb_priv[0]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001062err_out_guest1:
Liu Yu08b7fa92011-06-14 18:34:59 -05001063 kfree(vcpu_e500->gtlb_arch[1]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001064err_out_guest0:
Liu Yu08b7fa92011-06-14 18:34:59 -05001065 kfree(vcpu_e500->gtlb_arch[0]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001066err_out:
1067 return -1;
1068}
1069
1070void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
1071{
Liu Yu08b7fa92011-06-14 18:34:59 -05001072 int stlbsel, i;
1073
1074 /* release all privs */
1075 for (stlbsel = 0; stlbsel < 2; stlbsel++)
1076 for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) {
1077 struct tlbe_priv *priv =
1078 &vcpu_e500->gtlb_priv[stlbsel][i];
1079 kvmppc_e500_priv_release(priv);
1080 }
1081
Liu Yudd9ebf1f2011-06-14 18:35:14 -05001082 kvmppc_e500_id_table_free(vcpu_e500);
Liu Yu08b7fa92011-06-14 18:34:59 -05001083 kfree(vcpu_e500->gtlb_arch[1]);
1084 kfree(vcpu_e500->gtlb_arch[0]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001085}