| Paul Mundt | 8263a67 | 2009-03-17 17:49:49 +0900 | [diff] [blame] | 1 | /* | 
|  | 2 | * arch/sh/mm/tlb-pteaex.c | 
|  | 3 | * | 
|  | 4 | * TLB operations for SH-X3 CPUs featuring PTE ASID Extensions. | 
|  | 5 | * | 
|  | 6 | * Copyright (C) 2009 Paul Mundt | 
|  | 7 | * | 
|  | 8 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 9 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 10 | * for more details. | 
|  | 11 | */ | 
|  | 12 | #include <linux/kernel.h> | 
|  | 13 | #include <linux/mm.h> | 
|  | 14 | #include <linux/io.h> | 
|  | 15 | #include <asm/system.h> | 
|  | 16 | #include <asm/mmu_context.h> | 
|  | 17 | #include <asm/cacheflush.h> | 
|  | 18 |  | 
|  | 19 | void update_mmu_cache(struct vm_area_struct * vma, | 
|  | 20 | unsigned long address, pte_t pte) | 
|  | 21 | { | 
|  | 22 | unsigned long flags; | 
|  | 23 | unsigned long pteval; | 
|  | 24 | unsigned long vpn; | 
|  | 25 |  | 
|  | 26 | /* Ptrace may call this routine. */ | 
|  | 27 | if (vma && current->active_mm != vma->vm_mm) | 
|  | 28 | return; | 
|  | 29 |  | 
|  | 30 | #ifndef CONFIG_CACHE_OFF | 
|  | 31 | { | 
|  | 32 | unsigned long pfn = pte_pfn(pte); | 
|  | 33 |  | 
|  | 34 | if (pfn_valid(pfn)) { | 
|  | 35 | struct page *page = pfn_to_page(pfn); | 
|  | 36 |  | 
|  | 37 | if (!test_bit(PG_mapped, &page->flags)) { | 
|  | 38 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | 
|  | 39 | __flush_wback_region((void *)P1SEGADDR(phys), | 
|  | 40 | PAGE_SIZE); | 
|  | 41 | __set_bit(PG_mapped, &page->flags); | 
|  | 42 | } | 
|  | 43 | } | 
|  | 44 | } | 
|  | 45 | #endif | 
|  | 46 |  | 
|  | 47 | local_irq_save(flags); | 
|  | 48 |  | 
|  | 49 | /* Set PTEH register */ | 
|  | 50 | vpn = address & MMU_VPN_MASK; | 
|  | 51 | __raw_writel(vpn, MMU_PTEH); | 
|  | 52 |  | 
|  | 53 | /* Set PTEAEX */ | 
|  | 54 | __raw_writel(get_asid(), MMU_PTEAEX); | 
|  | 55 |  | 
|  | 56 | pteval = pte.pte_low; | 
|  | 57 |  | 
|  | 58 | /* Set PTEA register */ | 
|  | 59 | #ifdef CONFIG_X2TLB | 
|  | 60 | /* | 
|  | 61 | * For the extended mode TLB this is trivial, only the ESZ and | 
|  | 62 | * EPR bits need to be written out to PTEA, with the remainder of | 
|  | 63 | * the protection bits (with the exception of the compat-mode SZ | 
|  | 64 | * and PR bits, which are cleared) being written out in PTEL. | 
|  | 65 | */ | 
|  | 66 | __raw_writel(pte.pte_high, MMU_PTEA); | 
| Paul Mundt | 8263a67 | 2009-03-17 17:49:49 +0900 | [diff] [blame] | 67 | #endif | 
|  | 68 |  | 
|  | 69 | /* Set PTEL register */ | 
|  | 70 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ | 
|  | 71 | #ifdef CONFIG_CACHE_WRITETHROUGH | 
|  | 72 | pteval |= _PAGE_WT; | 
|  | 73 | #endif | 
|  | 74 | /* conveniently, we want all the software flags to be 0 anyway */ | 
|  | 75 | __raw_writel(pteval, MMU_PTEL); | 
|  | 76 |  | 
|  | 77 | /* Load the TLB */ | 
|  | 78 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); | 
|  | 79 | local_irq_restore(flags); | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | /* | 
|  | 83 | * While SH-X2 extended TLB mode splits out the memory-mapped I/UTLB | 
|  | 84 | * data arrays, SH-X3 cores with PTEAEX split out the memory-mapped | 
|  | 85 | * address arrays. In compat mode the second array is inaccessible, while | 
|  | 86 | * in extended mode, the legacy 8-bit ASID field in address array 1 has | 
|  | 87 | * undefined behaviour. | 
|  | 88 | */ | 
|  | 89 | void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, | 
|  | 90 | unsigned long page) | 
|  | 91 | { | 
|  | 92 | jump_to_uncached(); | 
|  | 93 | __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); | 
|  | 94 | __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); | 
|  | 95 | back_to_cached(); | 
|  | 96 | } |