| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * arch/sh/mm/pg-sh4.c | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 1999, 2000, 2002  Niibe Yutaka | 
| Paul Mundt | 8cf1a74 | 2007-07-24 13:28:26 +0900 | [diff] [blame] | 5 | * Copyright (C) 2002 - 2007  Paul Mundt | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * | 
|  | 7 | * Released under the terms of the GNU GPL v2.0. | 
|  | 8 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/mm.h> | 
| Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 10 | #include <linux/init.h> | 
| Paul Mundt | 52e2778 | 2006-11-21 11:09:41 +0900 | [diff] [blame] | 11 | #include <linux/mutex.h> | 
| Paul Mundt | e06c4e5 | 2007-07-31 13:01:43 +0900 | [diff] [blame] | 12 | #include <linux/fs.h> | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 13 | #include <linux/highmem.h> | 
|  | 14 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <asm/mmu_context.h> | 
|  | 16 | #include <asm/cacheflush.h> | 
|  | 17 |  | 
| Paul Mundt | 11c1965 | 2006-12-25 10:19:56 +0900 | [diff] [blame] | 18 | #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) | 
| Paul Mundt | 8b39526 | 2006-09-27 14:38:02 +0900 | [diff] [blame] | 19 |  | 
| Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 20 | #define kmap_get_fixmap_pte(vaddr)                                     \ | 
|  | 21 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) | 
|  | 22 |  | 
|  | 23 | static pte_t *kmap_coherent_pte; | 
|  | 24 |  | 
|  | 25 | void __init kmap_coherent_init(void) | 
|  | 26 | { | 
|  | 27 | unsigned long vaddr; | 
|  | 28 |  | 
|  | 29 | /* cache the first coherent kmap pte */ | 
|  | 30 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | 
|  | 31 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | 
|  | 32 | } | 
|  | 33 |  | 
| Paul Mundt | 8cf1a74 | 2007-07-24 13:28:26 +0900 | [diff] [blame] | 34 | static inline void *kmap_coherent(struct page *page, unsigned long addr) | 
|  | 35 | { | 
|  | 36 | enum fixed_addresses idx; | 
|  | 37 | unsigned long vaddr, flags; | 
|  | 38 | pte_t pte; | 
|  | 39 |  | 
|  | 40 | inc_preempt_count(); | 
|  | 41 |  | 
|  | 42 | idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; | 
|  | 43 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | 
|  | 44 | pte = mk_pte(page, PAGE_KERNEL); | 
|  | 45 |  | 
|  | 46 | local_irq_save(flags); | 
|  | 47 | flush_tlb_one(get_asid(), vaddr); | 
|  | 48 | local_irq_restore(flags); | 
|  | 49 |  | 
|  | 50 | update_mmu_cache(NULL, vaddr, pte); | 
|  | 51 |  | 
| Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 52 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); | 
|  | 53 |  | 
| Paul Mundt | 8cf1a74 | 2007-07-24 13:28:26 +0900 | [diff] [blame] | 54 | return (void *)vaddr; | 
|  | 55 | } | 
|  | 56 |  | 
|  | 57 | static inline void kunmap_coherent(struct page *page) | 
|  | 58 | { | 
|  | 59 | dec_preempt_count(); | 
|  | 60 | preempt_check_resched(); | 
|  | 61 | } | 
|  | 62 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | /* | 
|  | 64 | * clear_user_page | 
|  | 65 | * @to: P1 address | 
|  | 66 | * @address: U0 address to be mapped | 
|  | 67 | * @page: page (virt_to_page(to)) | 
|  | 68 | */ | 
|  | 69 | void clear_user_page(void *to, unsigned long address, struct page *page) | 
|  | 70 | { | 
| Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 71 | __set_bit(PG_mapped, &page->flags); | 
| Paul Mundt | ba1789e | 2007-11-05 16:18:16 +0900 | [diff] [blame] | 72 |  | 
|  | 73 | clear_page(to); | 
|  | 74 | if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS)) | 
|  | 75 | __flush_wback_region(to, PAGE_SIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | } | 
|  | 77 |  | 
| Paul Mundt | ba1789e | 2007-11-05 16:18:16 +0900 | [diff] [blame] | 78 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | 
|  | 79 | unsigned long vaddr, void *dst, const void *src, | 
|  | 80 | unsigned long len) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | { | 
| Paul Mundt | ba1789e | 2007-11-05 16:18:16 +0900 | [diff] [blame] | 82 | void *vto; | 
|  | 83 |  | 
| Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 84 | __set_bit(PG_mapped, &page->flags); | 
| Paul Mundt | ba1789e | 2007-11-05 16:18:16 +0900 | [diff] [blame] | 85 |  | 
|  | 86 | vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 
|  | 87 | memcpy(vto, src, len); | 
|  | 88 | kunmap_coherent(vto); | 
|  | 89 |  | 
|  | 90 | if (vma->vm_flags & VM_EXEC) | 
|  | 91 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | 
|  | 92 | } | 
|  | 93 |  | 
|  | 94 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | 
|  | 95 | unsigned long vaddr, void *dst, const void *src, | 
|  | 96 | unsigned long len) | 
|  | 97 | { | 
|  | 98 | void *vfrom; | 
|  | 99 |  | 
|  | 100 | __set_bit(PG_mapped, &page->flags); | 
|  | 101 |  | 
|  | 102 | vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 
|  | 103 | memcpy(dst, vfrom, len); | 
|  | 104 | kunmap_coherent(vfrom); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | } | 
| Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 106 |  | 
| Paul Mundt | 7747b9a | 2007-11-05 16:12:32 +0900 | [diff] [blame] | 107 | void copy_user_highpage(struct page *to, struct page *from, | 
|  | 108 | unsigned long vaddr, struct vm_area_struct *vma) | 
|  | 109 | { | 
|  | 110 | void *vfrom, *vto; | 
|  | 111 |  | 
|  | 112 | __set_bit(PG_mapped, &to->flags); | 
|  | 113 |  | 
|  | 114 | vto = kmap_atomic(to, KM_USER1); | 
|  | 115 | vfrom = kmap_coherent(from, vaddr); | 
|  | 116 | copy_page(vto, vfrom); | 
|  | 117 | kunmap_coherent(vfrom); | 
|  | 118 |  | 
|  | 119 | if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS)) | 
|  | 120 | __flush_wback_region(vto, PAGE_SIZE); | 
|  | 121 |  | 
|  | 122 | kunmap_atomic(vto, KM_USER1); | 
|  | 123 | /* Make sure this page is cleared on other CPU's too before using it */ | 
|  | 124 | smp_wmb(); | 
|  | 125 | } | 
|  | 126 | EXPORT_SYMBOL(copy_user_highpage); | 
|  | 127 |  | 
| Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 128 | /* | 
|  | 129 | * For SH-4, we have our own implementation for ptep_get_and_clear | 
|  | 130 | */ | 
| Magnus Damm | 73382f7 | 2008-07-05 12:33:30 +0900 | [diff] [blame] | 131 | pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 
| Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 132 | { | 
|  | 133 | pte_t pte = *ptep; | 
|  | 134 |  | 
|  | 135 | pte_clear(mm, addr, ptep); | 
|  | 136 | if (!pte_not_present(pte)) { | 
|  | 137 | unsigned long pfn = pte_pfn(pte); | 
|  | 138 | if (pfn_valid(pfn)) { | 
|  | 139 | struct page *page = pfn_to_page(pfn); | 
|  | 140 | struct address_space *mapping = page_mapping(page); | 
|  | 141 | if (!mapping || !mapping_writably_mapped(mapping)) | 
|  | 142 | __clear_bit(PG_mapped, &page->flags); | 
|  | 143 | } | 
|  | 144 | } | 
|  | 145 | return pte; | 
|  | 146 | } |