| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 1 | /* | 
|  | 2 | * Userland implementation of gettimeofday() for 32 bits processes in a | 
|  | 3 | * ppc64 kernel for use in the vDSO | 
|  | 4 | * | 
|  | 5 | * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org, | 
|  | 6 | *                    IBM Corp. | 
|  | 7 | * | 
|  | 8 | * This program is free software; you can redistribute it and/or | 
|  | 9 | * modify it under the terms of the GNU General Public License | 
|  | 10 | * as published by the Free Software Foundation; either version | 
|  | 11 | * 2 of the License, or (at your option) any later version. | 
|  | 12 | */ | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 13 | #include <asm/processor.h> | 
|  | 14 | #include <asm/ppc_asm.h> | 
|  | 15 | #include <asm/vdso.h> | 
|  | 16 | #include <asm/asm-offsets.h> | 
|  | 17 | #include <asm/unistd.h> | 
|  | 18 |  | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 19 | /* Offset for the low 32-bit part of a field of long type */ | 
|  | 20 | #ifdef CONFIG_PPC64 | 
|  | 21 | #define LOPART	4 | 
|  | 22 | #else | 
|  | 23 | #define LOPART	0 | 
|  | 24 | #endif | 
|  | 25 |  | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 26 | .text | 
|  | 27 | /* | 
|  | 28 | * Exact prototype of gettimeofday | 
|  | 29 | * | 
|  | 30 | * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); | 
|  | 31 | * | 
|  | 32 | */ | 
|  | 33 | V_FUNCTION_BEGIN(__kernel_gettimeofday) | 
|  | 34 | .cfi_startproc | 
|  | 35 | mflr	r12 | 
|  | 36 | .cfi_register lr,r12 | 
|  | 37 |  | 
|  | 38 | mr	r10,r3			/* r10 saves tv */ | 
|  | 39 | mr	r11,r4			/* r11 saves tz */ | 
|  | 40 | bl	__get_datapage@local	/* get data page */ | 
|  | 41 | mr	r9, r3			/* datapage ptr in r9 */ | 
| Tony Breeds | 74609f4 | 2007-06-26 09:50:32 +1000 | [diff] [blame] | 42 | cmplwi	r10,0			/* check if tv is NULL */ | 
|  | 43 | beq	3f | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 44 | bl	__do_get_xsec@local	/* get xsec from tb & kernel */ | 
|  | 45 | bne-	2f			/* out of line -> do syscall */ | 
|  | 46 |  | 
|  | 47 | /* seconds are xsec >> 20 */ | 
|  | 48 | rlwinm	r5,r4,12,20,31 | 
|  | 49 | rlwimi	r5,r3,12,0,19 | 
|  | 50 | stw	r5,TVAL32_TV_SEC(r10) | 
|  | 51 |  | 
|  | 52 | /* get remaining xsec and convert to usec. we scale | 
|  | 53 | * up remaining xsec by 12 bits and get the top 32 bits | 
|  | 54 | * of the multiplication | 
|  | 55 | */ | 
|  | 56 | rlwinm	r5,r4,12,0,19 | 
|  | 57 | lis	r6,1000000@h | 
|  | 58 | ori	r6,r6,1000000@l | 
|  | 59 | mulhwu	r5,r5,r6 | 
|  | 60 | stw	r5,TVAL32_TV_USEC(r10) | 
|  | 61 |  | 
| Tony Breeds | 74609f4 | 2007-06-26 09:50:32 +1000 | [diff] [blame] | 62 | 3:	cmplwi	r11,0			/* check if tz is NULL */ | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 63 | beq	1f | 
|  | 64 | lwz	r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */ | 
|  | 65 | lwz	r5,CFG_TZ_DSTTIME(r9) | 
|  | 66 | stw	r4,TZONE_TZ_MINWEST(r11) | 
|  | 67 | stw	r5,TZONE_TZ_DSTTIME(r11) | 
|  | 68 |  | 
|  | 69 | 1:	mtlr	r12 | 
| Benjamin Herrenschmidt | 5d66da3 | 2005-11-16 13:54:32 +1100 | [diff] [blame] | 70 | crclr	cr0*4+so | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 71 | li	r3,0 | 
|  | 72 | blr | 
|  | 73 |  | 
|  | 74 | 2: | 
|  | 75 | mtlr	r12 | 
|  | 76 | mr	r3,r10 | 
|  | 77 | mr	r4,r11 | 
|  | 78 | li	r0,__NR_gettimeofday | 
|  | 79 | sc | 
|  | 80 | blr | 
|  | 81 | .cfi_endproc | 
|  | 82 | V_FUNCTION_END(__kernel_gettimeofday) | 
|  | 83 |  | 
|  | 84 | /* | 
|  | 85 | * Exact prototype of clock_gettime() | 
|  | 86 | * | 
|  | 87 | * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); | 
|  | 88 | * | 
|  | 89 | */ | 
|  | 90 | V_FUNCTION_BEGIN(__kernel_clock_gettime) | 
|  | 91 | .cfi_startproc | 
|  | 92 | /* Check for supported clock IDs */ | 
|  | 93 | cmpli	cr0,r3,CLOCK_REALTIME | 
|  | 94 | cmpli	cr1,r3,CLOCK_MONOTONIC | 
| Benjamin Herrenschmidt | 0c37ec2 | 2005-11-14 14:55:58 +1100 | [diff] [blame] | 95 | cror	cr0*4+eq,cr0*4+eq,cr1*4+eq | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 96 | bne	cr0,99f | 
|  | 97 |  | 
|  | 98 | mflr	r12			/* r12 saves lr */ | 
|  | 99 | .cfi_register lr,r12 | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 100 | mr	r11,r4			/* r11 saves tp */ | 
|  | 101 | bl	__get_datapage@local	/* get data page */ | 
| Benjamin Herrenschmidt | 0c37ec2 | 2005-11-14 14:55:58 +1100 | [diff] [blame] | 102 | mr	r9,r3			/* datapage ptr in r9 */ | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 103 |  | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 104 | 50:	bl	__do_get_tspec@local	/* get sec/nsec from tb & kernel */ | 
|  | 105 | bne	cr1,80f			/* not monotonic -> all done */ | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 106 |  | 
|  | 107 | /* | 
|  | 108 | * CLOCK_MONOTONIC | 
|  | 109 | */ | 
|  | 110 |  | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 111 | /* now we must fixup using wall to monotonic. We need to snapshot | 
|  | 112 | * that value and do the counter trick again. Fortunately, we still | 
|  | 113 | * have the counter value in r8 that was returned by __do_get_xsec. | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 114 | * At this point, r3,r4 contain our sec/nsec values, r5 and r6 | 
|  | 115 | * can be used, r7 contains NSEC_PER_SEC. | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 116 | */ | 
|  | 117 |  | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 118 | lwz	r5,WTOM_CLOCK_SEC(r9) | 
|  | 119 | lwz	r6,WTOM_CLOCK_NSEC(r9) | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 120 |  | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 121 | /* We now have our offset in r5,r6. We create a fake dependency | 
|  | 122 | * on that value and re-check the counter | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 123 | */ | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 124 | or	r0,r6,r5 | 
|  | 125 | xor	r0,r0,r0 | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 126 | add	r9,r9,r0 | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 127 | lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 128 | cmpl    cr0,r8,r0		/* check if updated */ | 
|  | 129 | bne-	50b | 
|  | 130 |  | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 131 | /* Calculate and store result. Note that this mimics the C code, | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 132 | * which may cause funny results if nsec goes negative... is that | 
|  | 133 | * possible at all ? | 
|  | 134 | */ | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 135 | add	r3,r3,r5 | 
|  | 136 | add	r4,r4,r6 | 
|  | 137 | cmpw	cr0,r4,r7 | 
|  | 138 | cmpwi	cr1,r4,0 | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 139 | blt	1f | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 140 | subf	r4,r7,r4 | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 141 | addi	r3,r3,1 | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 142 | 1:	bge	cr1,80f | 
| Benjamin Herrenschmidt | 0c37ec2 | 2005-11-14 14:55:58 +1100 | [diff] [blame] | 143 | addi	r3,r3,-1 | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 144 | add	r4,r4,r7 | 
|  | 145 |  | 
|  | 146 | 80:	stw	r3,TSPC32_TV_SEC(r11) | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 147 | stw	r4,TSPC32_TV_NSEC(r11) | 
|  | 148 |  | 
|  | 149 | mtlr	r12 | 
| Benjamin Herrenschmidt | 5d66da3 | 2005-11-16 13:54:32 +1100 | [diff] [blame] | 150 | crclr	cr0*4+so | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 151 | li	r3,0 | 
|  | 152 | blr | 
|  | 153 |  | 
|  | 154 | /* | 
|  | 155 | * syscall fallback | 
|  | 156 | */ | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 157 | 99: | 
|  | 158 | li	r0,__NR_clock_gettime | 
|  | 159 | sc | 
|  | 160 | blr | 
|  | 161 | .cfi_endproc | 
|  | 162 | V_FUNCTION_END(__kernel_clock_gettime) | 
|  | 163 |  | 
|  | 164 |  | 
|  | 165 | /* | 
|  | 166 | * Exact prototype of clock_getres() | 
|  | 167 | * | 
|  | 168 | * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); | 
|  | 169 | * | 
|  | 170 | */ | 
|  | 171 | V_FUNCTION_BEGIN(__kernel_clock_getres) | 
|  | 172 | .cfi_startproc | 
|  | 173 | /* Check for supported clock IDs */ | 
|  | 174 | cmpwi	cr0,r3,CLOCK_REALTIME | 
|  | 175 | cmpwi	cr1,r3,CLOCK_MONOTONIC | 
| Benjamin Herrenschmidt | 0c37ec2 | 2005-11-14 14:55:58 +1100 | [diff] [blame] | 176 | cror	cr0*4+eq,cr0*4+eq,cr1*4+eq | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 177 | bne	cr0,99f | 
|  | 178 |  | 
|  | 179 | li	r3,0 | 
|  | 180 | cmpli	cr0,r4,0 | 
| Benjamin Herrenschmidt | 5d66da3 | 2005-11-16 13:54:32 +1100 | [diff] [blame] | 181 | crclr	cr0*4+so | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 182 | beqlr | 
|  | 183 | lis	r5,CLOCK_REALTIME_RES@h | 
|  | 184 | ori	r5,r5,CLOCK_REALTIME_RES@l | 
|  | 185 | stw	r3,TSPC32_TV_SEC(r4) | 
|  | 186 | stw	r5,TSPC32_TV_NSEC(r4) | 
|  | 187 | blr | 
|  | 188 |  | 
|  | 189 | /* | 
|  | 190 | * syscall fallback | 
|  | 191 | */ | 
|  | 192 | 99: | 
|  | 193 | li	r0,__NR_clock_getres | 
|  | 194 | sc | 
|  | 195 | blr | 
|  | 196 | .cfi_endproc | 
|  | 197 | V_FUNCTION_END(__kernel_clock_getres) | 
|  | 198 |  | 
|  | 199 |  | 
|  | 200 | /* | 
|  | 201 | * This is the core of gettimeofday() & friends, it returns the xsec | 
|  | 202 | * value in r3 & r4 and expects the datapage ptr (non clobbered) | 
|  | 203 | * in r9. clobbers r0,r4,r5,r6,r7,r8. | 
|  | 204 | * When returning, r8 contains the counter value that can be reused | 
|  | 205 | * by the monotonic clock implementation | 
|  | 206 | */ | 
|  | 207 | __do_get_xsec: | 
|  | 208 | .cfi_startproc | 
|  | 209 | /* Check for update count & load values. We use the low | 
|  | 210 | * order 32 bits of the update count | 
|  | 211 | */ | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 212 | 1:	lwz	r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9) | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 213 | andi.	r0,r8,1			/* pending update ? loop */ | 
|  | 214 | bne-	1b | 
|  | 215 | xor	r0,r8,r8		/* create dependency */ | 
|  | 216 | add	r9,r9,r0 | 
|  | 217 |  | 
|  | 218 | /* Load orig stamp (offset to TB) */ | 
|  | 219 | lwz	r5,CFG_TB_ORIG_STAMP(r9) | 
|  | 220 | lwz	r6,(CFG_TB_ORIG_STAMP+4)(r9) | 
|  | 221 |  | 
|  | 222 | /* Get a stable TB value */ | 
|  | 223 | 2:	mftbu	r3 | 
|  | 224 | mftbl	r4 | 
|  | 225 | mftbu	r0 | 
|  | 226 | cmpl	cr0,r3,r0 | 
|  | 227 | bne-	2b | 
|  | 228 |  | 
|  | 229 | /* Substract tb orig stamp. If the high part is non-zero, we jump to | 
|  | 230 | * the slow path which call the syscall. | 
|  | 231 | * If it's ok, then we have our 32 bits tb_ticks value in r7 | 
|  | 232 | */ | 
|  | 233 | subfc	r7,r6,r4 | 
|  | 234 | subfe.	r0,r5,r3 | 
|  | 235 | bne-	3f | 
|  | 236 |  | 
|  | 237 | /* Load scale factor & do multiplication */ | 
|  | 238 | lwz	r5,CFG_TB_TO_XS(r9)	/* load values */ | 
|  | 239 | lwz	r6,(CFG_TB_TO_XS+4)(r9) | 
|  | 240 | mulhwu	r4,r7,r5 | 
|  | 241 | mulhwu	r6,r7,r6 | 
|  | 242 | mullw	r0,r7,r5 | 
|  | 243 | addc	r6,r6,r0 | 
|  | 244 |  | 
|  | 245 | /* At this point, we have the scaled xsec value in r4 + XER:CA | 
|  | 246 | * we load & add the stamp since epoch | 
|  | 247 | */ | 
|  | 248 | lwz	r5,CFG_STAMP_XSEC(r9) | 
|  | 249 | lwz	r6,(CFG_STAMP_XSEC+4)(r9) | 
|  | 250 | adde	r4,r4,r6 | 
|  | 251 | addze	r3,r5 | 
|  | 252 |  | 
|  | 253 | /* We now have our result in r3,r4. We create a fake dependency | 
|  | 254 | * on that result and re-check the counter | 
|  | 255 | */ | 
|  | 256 | or	r6,r4,r3 | 
|  | 257 | xor	r0,r6,r6 | 
|  | 258 | add	r9,r9,r0 | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 259 | lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) | 
| Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 260 | cmpl    cr0,r8,r0		/* check if updated */ | 
|  | 261 | bne-	1b | 
|  | 262 |  | 
|  | 263 | /* Warning ! The caller expects CR:EQ to be set to indicate a | 
|  | 264 | * successful calculation (so it won't fallback to the syscall | 
|  | 265 | * method). We have overriden that CR bit in the counter check, | 
|  | 266 | * but fortunately, the loop exit condition _is_ CR:EQ set, so | 
|  | 267 | * we can exit safely here. If you change this code, be careful | 
|  | 268 | * of that side effect. | 
|  | 269 | */ | 
|  | 270 | 3:	blr | 
|  | 271 | .cfi_endproc | 
| Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 272 |  | 
|  | 273 | /* | 
|  | 274 | * This is the core of clock_gettime(), it returns the current | 
|  | 275 | * time in seconds and nanoseconds in r3 and r4. | 
|  | 276 | * It expects the datapage ptr in r9 and doesn't clobber it. | 
|  | 277 | * It clobbers r0, r5, r6, r10 and returns NSEC_PER_SEC in r7. | 
|  | 278 | * On return, r8 contains the counter value that can be reused. | 
|  | 279 | * This clobbers cr0 but not any other cr field. | 
|  | 280 | */ | 
|  | 281 | __do_get_tspec: | 
|  | 282 | .cfi_startproc | 
|  | 283 | /* Check for update count & load values. We use the low | 
|  | 284 | * order 32 bits of the update count | 
|  | 285 | */ | 
|  | 286 | 1:	lwz	r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9) | 
|  | 287 | andi.	r0,r8,1			/* pending update ? loop */ | 
|  | 288 | bne-	1b | 
|  | 289 | xor	r0,r8,r8		/* create dependency */ | 
|  | 290 | add	r9,r9,r0 | 
|  | 291 |  | 
|  | 292 | /* Load orig stamp (offset to TB) */ | 
|  | 293 | lwz	r5,CFG_TB_ORIG_STAMP(r9) | 
|  | 294 | lwz	r6,(CFG_TB_ORIG_STAMP+4)(r9) | 
|  | 295 |  | 
|  | 296 | /* Get a stable TB value */ | 
|  | 297 | 2:	mftbu	r3 | 
|  | 298 | mftbl	r4 | 
|  | 299 | mftbu	r0 | 
|  | 300 | cmpl	cr0,r3,r0 | 
|  | 301 | bne-	2b | 
|  | 302 |  | 
|  | 303 | /* Subtract tb orig stamp and shift left 12 bits. | 
|  | 304 | */ | 
|  | 305 | subfc	r7,r6,r4 | 
|  | 306 | subfe	r0,r5,r3 | 
|  | 307 | slwi	r0,r0,12 | 
|  | 308 | rlwimi.	r0,r7,12,20,31 | 
|  | 309 | slwi	r7,r7,12 | 
|  | 310 |  | 
|  | 311 | /* Load scale factor & do multiplication */ | 
|  | 312 | lwz	r5,CFG_TB_TO_XS(r9)	/* load values */ | 
|  | 313 | lwz	r6,(CFG_TB_TO_XS+4)(r9) | 
|  | 314 | mulhwu	r3,r7,r6 | 
|  | 315 | mullw	r10,r7,r5 | 
|  | 316 | mulhwu	r4,r7,r5 | 
|  | 317 | addc	r10,r3,r10 | 
|  | 318 | li	r3,0 | 
|  | 319 |  | 
|  | 320 | beq+	4f			/* skip high part computation if 0 */ | 
|  | 321 | mulhwu	r3,r0,r5 | 
|  | 322 | mullw	r7,r0,r5 | 
|  | 323 | mulhwu	r5,r0,r6 | 
|  | 324 | mullw	r6,r0,r6 | 
|  | 325 | adde	r4,r4,r7 | 
|  | 326 | addze	r3,r3 | 
|  | 327 | addc	r4,r4,r5 | 
|  | 328 | addze	r3,r3 | 
|  | 329 | addc	r10,r10,r6 | 
|  | 330 |  | 
|  | 331 | 4:	addze	r4,r4			/* add in carry */ | 
|  | 332 | lis	r7,NSEC_PER_SEC@h | 
|  | 333 | ori	r7,r7,NSEC_PER_SEC@l | 
|  | 334 | mulhwu	r4,r4,r7		/* convert to nanoseconds */ | 
|  | 335 |  | 
|  | 336 | /* At this point, we have seconds & nanoseconds since the xtime | 
|  | 337 | * stamp in r3+CA and r4.  Load & add the xtime stamp. | 
|  | 338 | */ | 
|  | 339 | #ifdef CONFIG_PPC64 | 
|  | 340 | lwz	r5,STAMP_XTIME+TSPC64_TV_SEC+LOPART(r9) | 
|  | 341 | lwz	r6,STAMP_XTIME+TSPC64_TV_NSEC+LOPART(r9) | 
|  | 342 | #else | 
|  | 343 | lwz	r5,STAMP_XTIME+TSPC32_TV_SEC(r9) | 
|  | 344 | lwz	r6,STAMP_XTIME+TSPC32_TV_NSEC(r9) | 
|  | 345 | #endif | 
|  | 346 | add	r4,r4,r6 | 
|  | 347 | adde	r3,r3,r5 | 
|  | 348 |  | 
|  | 349 | /* We now have our result in r3,r4. We create a fake dependency | 
|  | 350 | * on that result and re-check the counter | 
|  | 351 | */ | 
|  | 352 | or	r6,r4,r3 | 
|  | 353 | xor	r0,r6,r6 | 
|  | 354 | add	r9,r9,r0 | 
|  | 355 | lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9) | 
|  | 356 | cmpl    cr0,r8,r0		/* check if updated */ | 
|  | 357 | bne-	1b | 
|  | 358 |  | 
|  | 359 | /* check for nanosecond overflow and adjust if necessary */ | 
|  | 360 | cmpw	r4,r7 | 
|  | 361 | bltlr				/* all done if no overflow */ | 
|  | 362 | subf	r4,r7,r4		/* adjust if overflow */ | 
|  | 363 | addi	r3,r3,1 | 
|  | 364 |  | 
|  | 365 | blr | 
|  | 366 | .cfi_endproc |