Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Low-level SLB routines |
| 3 | * |
| 4 | * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM |
| 5 | * |
| 6 | * Based on earlier C version: |
| 7 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com |
| 8 | * Copyright (c) 2001 Dave Engebretsen |
| 9 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or |
| 12 | * modify it under the terms of the GNU General Public License |
| 13 | * as published by the Free Software Foundation; either version |
| 14 | * 2 of the License, or (at your option) any later version. |
| 15 | */ |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <asm/processor.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <asm/ppc_asm.h> |
Sam Ravnborg | 0013a85 | 2005-09-09 20:57:26 +0200 | [diff] [blame] | 19 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/cputable.h> |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 21 | #include <asm/page.h> |
| 22 | #include <asm/mmu.h> |
| 23 | #include <asm/pgtable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 25 | /* void slb_allocate_realmode(unsigned long ea); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | * |
| 27 | * Create an SLB entry for the given EA (user or kernel). |
| 28 | * r3 = faulting address, r13 = PACA |
| 29 | * r9, r10, r11 are clobbered by this function |
| 30 | * No other registers are examined or changed. |
| 31 | */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 32 | _GLOBAL(slb_allocate_realmode) |
| 33 | /* r3 = faulting address */ |
| 34 | |
| 35 | srdi r9,r3,60 /* get region */ |
| 36 | srdi r10,r3,28 /* get esid */ |
Michael Ellerman | b5666f7 | 2005-12-05 10:24:33 -0600 | [diff] [blame] | 37 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 38 | |
Michael Ellerman | b5666f7 | 2005-12-05 10:24:33 -0600 | [diff] [blame] | 39 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 40 | blt cr7,0f /* user or kernel? */ |
| 41 | |
| 42 | /* kernel address: proto-VSID = ESID */ |
| 43 | /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but |
| 44 | * this code will generate the protoVSID 0xfffffffff for the |
| 45 | * top segment. That's ok, the scramble below will translate |
| 46 | * it to VSID 0, which is reserved as a bad VSID - one which |
| 47 | * will never have any pages in it. */ |
| 48 | |
| 49 | /* Check if hitting the linear mapping of the vmalloc/ioremap |
| 50 | * kernel space |
| 51 | */ |
| 52 | bne cr7,1f |
| 53 | |
| 54 | /* Linear mapping encoding bits, the "li" instruction below will |
| 55 | * be patched by the kernel at boot |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 57 | _GLOBAL(slb_miss_kernel_load_linear) |
| 58 | li r11,0 |
| 59 | b slb_finish_load |
| 60 | |
Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 61 | 1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 62 | * will be patched by the kernel at boot |
| 63 | */ |
Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 64 | BEGIN_FTR_SECTION |
| 65 | /* check whether this is in vmalloc or ioremap space */ |
| 66 | clrldi r11,r10,48 |
| 67 | cmpldi r11,(VMALLOC_SIZE >> 28) - 1 |
| 68 | bgt 5f |
| 69 | lhz r11,PACAVMALLOCSLLP(r13) |
| 70 | b slb_finish_load |
| 71 | 5: |
| 72 | END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) |
| 73 | _GLOBAL(slb_miss_kernel_load_io) |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 74 | li r11,0 |
| 75 | b slb_finish_load |
| 76 | |
| 77 | |
| 78 | 0: /* user address: proto-VSID = context << 15 | ESID. First check |
| 79 | * if the address is within the boundaries of the user region |
| 80 | */ |
| 81 | srdi. r9,r10,USER_ESID_BITS |
| 82 | bne- 8f /* invalid ea bits set */ |
| 83 | |
| 84 | /* Figure out if the segment contains huge pages */ |
| 85 | #ifdef CONFIG_HUGETLB_PAGE |
| 86 | BEGIN_FTR_SECTION |
| 87 | b 1f |
| 88 | END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE) |
David Gibson | 7d24f0b | 2005-11-07 00:57:52 -0800 | [diff] [blame] | 89 | cmpldi r10,16 |
| 90 | |
| 91 | lhz r9,PACALOWHTLBAREAS(r13) |
| 92 | mr r11,r10 |
| 93 | blt 5f |
| 94 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 95 | lhz r9,PACAHIGHHTLBAREAS(r13) |
| 96 | srdi r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT) |
David Gibson | 7d24f0b | 2005-11-07 00:57:52 -0800 | [diff] [blame] | 97 | |
| 98 | 5: srd r9,r9,r11 |
| 99 | andi. r9,r9,1 |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 100 | beq 1f |
| 101 | _GLOBAL(slb_miss_user_load_huge) |
| 102 | li r11,0 |
| 103 | b 2f |
| 104 | 1: |
| 105 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 106 | |
Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 107 | lhz r11,PACACONTEXTSLLP(r13) |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 108 | 2: |
| 109 | ld r9,PACACONTEXTID(r13) |
| 110 | rldimi r10,r9,USER_ESID_BITS,0 |
| 111 | b slb_finish_load |
| 112 | |
| 113 | 8: /* invalid EA */ |
| 114 | li r10,0 /* BAD_VSID */ |
| 115 | li r11,SLB_VSID_USER /* flags don't much matter */ |
| 116 | b slb_finish_load |
| 117 | |
| 118 | #ifdef __DISABLED__ |
| 119 | |
| 120 | /* void slb_allocate_user(unsigned long ea); |
| 121 | * |
| 122 | * Create an SLB entry for the given EA (user or kernel). |
| 123 | * r3 = faulting address, r13 = PACA |
| 124 | * r9, r10, r11 are clobbered by this function |
| 125 | * No other registers are examined or changed. |
| 126 | * |
| 127 | * It is called with translation enabled in order to be able to walk the |
| 128 | * page tables. This is not currently used. |
| 129 | */ |
| 130 | _GLOBAL(slb_allocate_user) |
| 131 | /* r3 = faulting address */ |
| 132 | srdi r10,r3,28 /* get esid */ |
| 133 | |
| 134 | crset 4*cr7+lt /* set "user" flag for later */ |
| 135 | |
| 136 | /* check if we fit in the range covered by the pagetables*/ |
| 137 | srdi. r9,r3,PGTABLE_EADDR_SIZE |
| 138 | crnot 4*cr0+eq,4*cr0+eq |
| 139 | beqlr |
| 140 | |
| 141 | /* now we need to get to the page tables in order to get the page |
| 142 | * size encoding from the PMD. In the future, we'll be able to deal |
| 143 | * with 1T segments too by getting the encoding from the PGD instead |
| 144 | */ |
| 145 | ld r9,PACAPGDIR(r13) |
| 146 | cmpldi cr0,r9,0 |
| 147 | beqlr |
| 148 | rlwinm r11,r10,8,25,28 |
| 149 | ldx r9,r9,r11 /* get pgd_t */ |
| 150 | cmpldi cr0,r9,0 |
| 151 | beqlr |
| 152 | rlwinm r11,r10,3,17,28 |
| 153 | ldx r9,r9,r11 /* get pmd_t */ |
| 154 | cmpldi cr0,r9,0 |
| 155 | beqlr |
| 156 | |
| 157 | /* build vsid flags */ |
| 158 | andi. r11,r9,SLB_VSID_LLP |
| 159 | ori r11,r11,SLB_VSID_USER |
| 160 | |
| 161 | /* get context to calculate proto-VSID */ |
| 162 | ld r9,PACACONTEXTID(r13) |
| 163 | rldimi r10,r9,USER_ESID_BITS,0 |
| 164 | |
| 165 | /* fall through slb_finish_load */ |
| 166 | |
| 167 | #endif /* __DISABLED__ */ |
| 168 | |
| 169 | |
| 170 | /* |
| 171 | * Finish loading of an SLB entry and return |
| 172 | * |
Michael Ellerman | b5666f7 | 2005-12-05 10:24:33 -0600 | [diff] [blame] | 173 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 174 | */ |
| 175 | slb_finish_load: |
| 176 | ASM_VSID_SCRAMBLE(r10,r9) |
| 177 | rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ |
| 178 | |
| 179 | /* r3 = EA, r11 = VSID data */ |
| 180 | /* |
| 181 | * Find a slot, round robin. Previously we tried to find a |
| 182 | * free slot first but that took too long. Unfortunately we |
| 183 | * dont have any LRU information to help us choose a slot. |
| 184 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | #ifdef CONFIG_PPC_ISERIES |
| 186 | /* |
| 187 | * On iSeries, the "bolted" stack segment can be cast out on |
| 188 | * shared processor switch so we need to check for a miss on |
| 189 | * it and restore it to the right slot. |
| 190 | */ |
| 191 | ld r9,PACAKSAVE(r13) |
| 192 | clrrdi r9,r9,28 |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 193 | clrrdi r3,r3,28 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 195 | cmpld r9,r3 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | beq 3f |
| 197 | #endif /* CONFIG_PPC_ISERIES */ |
| 198 | |
| 199 | ld r10,PACASTABRR(r13) |
| 200 | addi r10,r10,1 |
| 201 | /* use a cpu feature mask if we ever change our slb size */ |
| 202 | cmpldi r10,SLB_NUM_ENTRIES |
| 203 | |
| 204 | blt+ 4f |
| 205 | li r10,SLB_NUM_BOLTED |
| 206 | |
| 207 | 4: |
| 208 | std r10,PACASTABRR(r13) |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 209 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | 3: |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 211 | rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */ |
| 212 | oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 214 | /* r3 = ESID data, r11 = VSID data */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | |
| 216 | /* |
| 217 | * No need for an isync before or after this slbmte. The exception |
| 218 | * we enter with and the rfid we exit with are context synchronizing. |
| 219 | */ |
| 220 | slbmte r11,r10 |
| 221 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 222 | /* we're done for kernel addresses */ |
| 223 | crclr 4*cr0+eq /* set result to "success" */ |
| 224 | bgelr cr7 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | |
| 226 | /* Update the slb cache */ |
| 227 | lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */ |
| 228 | cmpldi r3,SLB_CACHE_ENTRIES |
| 229 | bge 1f |
| 230 | |
| 231 | /* still room in the slb cache */ |
| 232 | sldi r11,r3,1 /* r11 = offset * sizeof(u16) */ |
| 233 | rldicl r10,r10,36,28 /* get low 16 bits of the ESID */ |
| 234 | add r11,r11,r13 /* r11 = (u16 *)paca + offset */ |
| 235 | sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ |
| 236 | addi r3,r3,1 /* offset++ */ |
| 237 | b 2f |
| 238 | 1: /* offset >= SLB_CACHE_ENTRIES */ |
| 239 | li r3,SLB_CACHE_ENTRIES+1 |
| 240 | 2: |
| 241 | sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 242 | crclr 4*cr0+eq /* set result to "success" */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | blr |
| 244 | |