blob: 68cfcb27a9c444a3850fe6668c47e2364dbd06af [file] [log] [blame]
Dmitry Shmidt8ce17272011-05-24 11:14:33 -07001/*
2 * Misc utility routines for accessing chip-specific features
3 * of the SiliconBackplane-based Broadcom chips.
4 *
Dmitry Shmidt83252322012-03-16 12:52:00 -07005 * Copyright (C) 1999-2012, Broadcom Corporation
Dmitry Shmidt8ce17272011-05-24 11:14:33 -07006 *
Dmitry Shmidt83252322012-03-16 12:52:00 -07007 * Unless you and Broadcom execute a separate written software license
Dmitry Shmidt8ce17272011-05-24 11:14:33 -07008 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
Dmitry Shmidt4a3a0fa2012-04-12 10:18:39 -070025 * $Id: sbutils.c 310902 2012-01-26 19:45:33Z $
Dmitry Shmidt8ce17272011-05-24 11:14:33 -070026 */
27
Dmitry Shmidt83252322012-03-16 12:52:00 -070028#include <bcm_cfg.h>
Dmitry Shmidt8ce17272011-05-24 11:14:33 -070029#include <typedefs.h>
30#include <bcmdefs.h>
31#include <osl.h>
32#include <bcmutils.h>
33#include <siutils.h>
34#include <bcmdevs.h>
35#include <hndsoc.h>
36#include <sbchipc.h>
37#include <pcicfg.h>
38#include <sbpcmcia.h>
39
40#include "siutils_priv.h"
41
42
43/* local prototypes */
44static uint _sb_coreidx(si_info_t *sii, uint32 sba);
45static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
46 uint ncores);
47static uint32 _sb_coresba(si_info_t *sii);
48static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
49
50#define SET_SBREG(sii, r, mask, val) \
51 W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
52#define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
53
54/* sonicsrev */
55#define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
56#define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
57
58#define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
59#define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
60#define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
61#define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
62
63static uint32
64sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
65{
66 uint8 tmp;
67 uint32 val, intr_val = 0;
68
69
70 /*
71 * compact flash only has 11 bits address, while we needs 12 bits address.
72 * MEM_SEG will be OR'd with other 11 bits address in hardware,
73 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
74 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
75 */
76 if (PCMCIA(sii)) {
77 INTR_OFF(sii, intr_val);
78 tmp = 1;
79 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
80 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
81 }
82
83 val = R_REG(sii->osh, sbr);
84
85 if (PCMCIA(sii)) {
86 tmp = 0;
87 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
88 INTR_RESTORE(sii, intr_val);
89 }
90
91 return (val);
92}
93
94static void
95sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
96{
97 uint8 tmp;
98 volatile uint32 dummy;
99 uint32 intr_val = 0;
100
101
102 /*
103 * compact flash only has 11 bits address, while we needs 12 bits address.
104 * MEM_SEG will be OR'd with other 11 bits address in hardware,
105 * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
106 * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
107 */
108 if (PCMCIA(sii)) {
109 INTR_OFF(sii, intr_val);
110 tmp = 1;
111 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
112 sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
113 }
114
115 if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
116 dummy = R_REG(sii->osh, sbr);
Dmitry Shmidt83252322012-03-16 12:52:00 -0700117 BCM_REFERENCE(dummy);
Dmitry Shmidt8ce17272011-05-24 11:14:33 -0700118 W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
119 dummy = R_REG(sii->osh, sbr);
Dmitry Shmidt83252322012-03-16 12:52:00 -0700120 BCM_REFERENCE(dummy);
Dmitry Shmidt8ce17272011-05-24 11:14:33 -0700121 W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
122 } else
123 W_REG(sii->osh, sbr, v);
124
125 if (PCMCIA(sii)) {
126 tmp = 0;
127 OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
128 INTR_RESTORE(sii, intr_val);
129 }
130}
131
132uint
133sb_coreid(si_t *sih)
134{
135 si_info_t *sii;
136 sbconfig_t *sb;
137
138 sii = SI_INFO(sih);
139 sb = REGS2SB(sii->curmap);
140
141 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
142}
143
144uint
145sb_intflag(si_t *sih)
146{
147 si_info_t *sii;
148 void *corereg;
149 sbconfig_t *sb;
150 uint origidx, intflag, intr_val = 0;
151
152 sii = SI_INFO(sih);
153
154 INTR_OFF(sii, intr_val);
155 origidx = si_coreidx(sih);
156 corereg = si_setcore(sih, CC_CORE_ID, 0);
157 ASSERT(corereg != NULL);
158 sb = REGS2SB(corereg);
159 intflag = R_SBREG(sii, &sb->sbflagst);
160 sb_setcoreidx(sih, origidx);
161 INTR_RESTORE(sii, intr_val);
162
163 return intflag;
164}
165
166uint
167sb_flag(si_t *sih)
168{
169 si_info_t *sii;
170 sbconfig_t *sb;
171
172 sii = SI_INFO(sih);
173 sb = REGS2SB(sii->curmap);
174
175 return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
176}
177
178void
179sb_setint(si_t *sih, int siflag)
180{
181 si_info_t *sii;
182 sbconfig_t *sb;
183 uint32 vec;
184
185 sii = SI_INFO(sih);
186 sb = REGS2SB(sii->curmap);
187
188 if (siflag == -1)
189 vec = 0;
190 else
191 vec = 1 << siflag;
192 W_SBREG(sii, &sb->sbintvec, vec);
193}
194
195/* return core index of the core with address 'sba' */
196static uint
197_sb_coreidx(si_info_t *sii, uint32 sba)
198{
199 uint i;
200
201 for (i = 0; i < sii->numcores; i ++)
202 if (sba == sii->coresba[i])
203 return i;
204 return BADIDX;
205}
206
207/* return core address of the current core */
208static uint32
209_sb_coresba(si_info_t *sii)
210{
211 uint32 sbaddr;
212
213
214 switch (BUSTYPE(sii->pub.bustype)) {
215 case SI_BUS: {
216 sbconfig_t *sb = REGS2SB(sii->curmap);
217 sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
218 break;
219 }
220
221 case PCI_BUS:
222 sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
223 break;
224
225 case PCMCIA_BUS: {
226 uint8 tmp = 0;
227 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
228 sbaddr = (uint32)tmp << 12;
229 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
230 sbaddr |= (uint32)tmp << 16;
231 OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
232 sbaddr |= (uint32)tmp << 24;
233 break;
234 }
235
236 case SPI_BUS:
237 case SDIO_BUS:
238 sbaddr = (uint32)(uintptr)sii->curmap;
239 break;
240
241
242 default:
243 sbaddr = BADCOREADDR;
244 break;
245 }
246
247 return sbaddr;
248}
249
250uint
251sb_corevendor(si_t *sih)
252{
253 si_info_t *sii;
254 sbconfig_t *sb;
255
256 sii = SI_INFO(sih);
257 sb = REGS2SB(sii->curmap);
258
259 return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
260}
261
262uint
263sb_corerev(si_t *sih)
264{
265 si_info_t *sii;
266 sbconfig_t *sb;
267 uint sbidh;
268
269 sii = SI_INFO(sih);
270 sb = REGS2SB(sii->curmap);
271 sbidh = R_SBREG(sii, &sb->sbidhigh);
272
273 return (SBCOREREV(sbidh));
274}
275
276/* set core-specific control flags */
277void
278sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
279{
280 si_info_t *sii;
281 sbconfig_t *sb;
282 uint32 w;
283
284 sii = SI_INFO(sih);
285 sb = REGS2SB(sii->curmap);
286
287 ASSERT((val & ~mask) == 0);
288
289 /* mask and set */
290 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
291 (val << SBTML_SICF_SHIFT);
292 W_SBREG(sii, &sb->sbtmstatelow, w);
293}
294
295/* set/clear core-specific control flags */
296uint32
297sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
298{
299 si_info_t *sii;
300 sbconfig_t *sb;
301 uint32 w;
302
303 sii = SI_INFO(sih);
304 sb = REGS2SB(sii->curmap);
305
306 ASSERT((val & ~mask) == 0);
307
308 /* mask and set */
309 if (mask || val) {
310 w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
311 (val << SBTML_SICF_SHIFT);
312 W_SBREG(sii, &sb->sbtmstatelow, w);
313 }
314
315 /* return the new value
316 * for write operation, the following readback ensures the completion of write opration.
317 */
318 return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
319}
320
321/* set/clear core-specific status flags */
322uint32
323sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
324{
325 si_info_t *sii;
326 sbconfig_t *sb;
327 uint32 w;
328
329 sii = SI_INFO(sih);
330 sb = REGS2SB(sii->curmap);
331
332 ASSERT((val & ~mask) == 0);
333 ASSERT((mask & ~SISF_CORE_BITS) == 0);
334
335 /* mask and set */
336 if (mask || val) {
337 w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
338 (val << SBTMH_SISF_SHIFT);
339 W_SBREG(sii, &sb->sbtmstatehigh, w);
340 }
341
342 /* return the new value */
343 return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
344}
345
346bool
347sb_iscoreup(si_t *sih)
348{
349 si_info_t *sii;
350 sbconfig_t *sb;
351
352 sii = SI_INFO(sih);
353 sb = REGS2SB(sii->curmap);
354
355 return ((R_SBREG(sii, &sb->sbtmstatelow) &
356 (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
357 (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
358}
359
360/*
361 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
362 * switch back to the original core, and return the new value.
363 *
364 * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
365 *
366 * Also, when using pci/pcie, we can optimize away the core switching for pci registers
367 * and (on newer pci cores) chipcommon registers.
368 */
369uint
370sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
371{
372 uint origidx = 0;
373 uint32 *r = NULL;
374 uint w;
375 uint intr_val = 0;
376 bool fast = FALSE;
377 si_info_t *sii;
378
379 sii = SI_INFO(sih);
380
381 ASSERT(GOODIDX(coreidx));
382 ASSERT(regoff < SI_CORE_SIZE);
383 ASSERT((val & ~mask) == 0);
384
385 if (coreidx >= SI_MAXCORES)
386 return 0;
387
388 if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
389 /* If internal bus, we can always get at everything */
390 fast = TRUE;
391 /* map if does not exist */
392 if (!sii->regs[coreidx]) {
393 sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
394 SI_CORE_SIZE);
395 ASSERT(GOODREGS(sii->regs[coreidx]));
396 }
397 r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
398 } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
399 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
400
401 if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
402 /* Chipc registers are mapped at 12KB */
403
404 fast = TRUE;
405 r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
406 } else if (sii->pub.buscoreidx == coreidx) {
407 /* pci registers are at either in the last 2KB of an 8KB window
408 * or, in pcie and pci rev 13 at 8KB
409 */
410 fast = TRUE;
411 if (SI_FAST(sii))
412 r = (uint32 *)((char *)sii->curmap +
413 PCI_16KB0_PCIREGS_OFFSET + regoff);
414 else
415 r = (uint32 *)((char *)sii->curmap +
416 ((regoff >= SBCONFIGOFF) ?
417 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
418 regoff);
419 }
420 }
421
422 if (!fast) {
423 INTR_OFF(sii, intr_val);
424
425 /* save current core index */
426 origidx = si_coreidx(&sii->pub);
427
428 /* switch core */
429 r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
430 }
431 ASSERT(r != NULL);
432
433 /* mask and set */
434 if (mask || val) {
435 if (regoff >= SBCONFIGOFF) {
436 w = (R_SBREG(sii, r) & ~mask) | val;
437 W_SBREG(sii, r, w);
438 } else {
439 w = (R_REG(sii->osh, r) & ~mask) | val;
440 W_REG(sii->osh, r, w);
441 }
442 }
443
444 /* readback */
445 if (regoff >= SBCONFIGOFF)
446 w = R_SBREG(sii, r);
447 else {
448 if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
449 (coreidx == SI_CC_IDX) &&
450 (regoff == OFFSETOF(chipcregs_t, watchdog))) {
451 w = val;
452 } else
453 w = R_REG(sii->osh, r);
454 }
455
456 if (!fast) {
457 /* restore core index */
458 if (origidx != coreidx)
459 sb_setcoreidx(&sii->pub, origidx);
460
461 INTR_RESTORE(sii, intr_val);
462 }
463
464 return (w);
465}
466
467/* Scan the enumeration space to find all cores starting from the given
468 * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
469 * is the default core address at chip POR time and 'regs' is the virtual
470 * address that the default core is mapped at. 'ncores' is the number of
471 * cores expected on bus 'sbba'. It returns the total number of cores
472 * starting from bus 'sbba', inclusive.
473 */
474#define SB_MAXBUSES 2
475static uint
476_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
477{
478 uint next;
479 uint ncc = 0;
480 uint i;
481
482 if (bus >= SB_MAXBUSES) {
483 SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
484 return 0;
485 }
486 SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
487
488 /* Scan all cores on the bus starting from core 0.
489 * Core addresses must be contiguous on each bus.
490 */
491 for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
492 sii->coresba[next] = sbba + (i * SI_CORE_SIZE);
493
494 /* keep and reuse the initial register mapping */
495 if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (sii->coresba[next] == sba)) {
496 SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
497 sii->regs[next] = regs;
498 }
499
500 /* change core to 'next' and read its coreid */
501 sii->curmap = _sb_setcoreidx(sii, next);
502 sii->curidx = next;
503
504 sii->coreid[next] = sb_coreid(&sii->pub);
505
506 /* core specific processing... */
507 /* chipc provides # cores */
508 if (sii->coreid[next] == CC_CORE_ID) {
509 chipcregs_t *cc = (chipcregs_t *)sii->curmap;
510 uint32 ccrev = sb_corerev(&sii->pub);
511
512 /* determine numcores - this is the total # cores in the chip */
513 if (((ccrev == 4) || (ccrev >= 6)))
514 numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
515 CID_CC_SHIFT;
516 else {
517 /* Older chips */
518 uint chip = CHIPID(sii->pub.chip);
519
520 if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
521 numcores = 6;
522 else if (chip == BCM4704_CHIP_ID)
523 numcores = 9;
524 else if (chip == BCM5365_CHIP_ID)
525 numcores = 7;
526 else {
527 SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
528 chip));
529 ASSERT(0);
530 numcores = 1;
531 }
532 }
533 SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
534 sii->pub.issim ? "QT" : ""));
535 }
536 /* scan bridged SB(s) and add results to the end of the list */
537 else if (sii->coreid[next] == OCP_CORE_ID) {
538 sbconfig_t *sb = REGS2SB(sii->curmap);
539 uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
540 uint nsbcc;
541
542 sii->numcores = next + 1;
543
544 if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
545 continue;
546 nsbba &= 0xfffff000;
547 if (_sb_coreidx(sii, nsbba) != BADIDX)
548 continue;
549
550 nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
551 nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
552 if (sbba == SI_ENUM_BASE)
553 numcores -= nsbcc;
554 ncc += nsbcc;
555 }
556 }
557
558 SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
559
560 sii->numcores = i + ncc;
561 return sii->numcores;
562}
563
564/* scan the sb enumerated space to identify all cores */
565void
566sb_scan(si_t *sih, void *regs, uint devid)
567{
568 si_info_t *sii;
569 uint32 origsba;
570 sbconfig_t *sb;
571
572 sii = SI_INFO(sih);
573 sb = REGS2SB(sii->curmap);
574
575 sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
576
577 /* Save the current core info and validate it later till we know
578 * for sure what is good and what is bad.
579 */
580 origsba = _sb_coresba(sii);
581
582 /* scan all SB(s) starting from SI_ENUM_BASE */
583 sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
584}
585
586/*
587 * This function changes logical "focus" to the indicated core;
588 * must be called with interrupts off.
589 * Moreover, callers should keep interrupts off during switching out of and back to d11 core
590 */
591void *
592sb_setcoreidx(si_t *sih, uint coreidx)
593{
594 si_info_t *sii;
595
596 sii = SI_INFO(sih);
597
598 if (coreidx >= sii->numcores)
599 return (NULL);
600
601 /*
602 * If the user has provided an interrupt mask enabled function,
603 * then assert interrupts are disabled before switching the core.
604 */
605 ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
606
607 sii->curmap = _sb_setcoreidx(sii, coreidx);
608 sii->curidx = coreidx;
609
610 return (sii->curmap);
611}
612
613/* This function changes the logical "focus" to the indicated core.
614 * Return the current core's virtual address.
615 */
616static void *
617_sb_setcoreidx(si_info_t *sii, uint coreidx)
618{
619 uint32 sbaddr = sii->coresba[coreidx];
620 void *regs;
621
622 switch (BUSTYPE(sii->pub.bustype)) {
623 case SI_BUS:
624 /* map new one */
625 if (!sii->regs[coreidx]) {
626 sii->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
627 ASSERT(GOODREGS(sii->regs[coreidx]));
628 }
629 regs = sii->regs[coreidx];
630 break;
631
632 case PCI_BUS:
633 /* point bar0 window */
634 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
635 regs = sii->curmap;
636 break;
637
638 case PCMCIA_BUS: {
639 uint8 tmp = (sbaddr >> 12) & 0x0f;
640 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
641 tmp = (sbaddr >> 16) & 0xff;
642 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
643 tmp = (sbaddr >> 24) & 0xff;
644 OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
645 regs = sii->curmap;
646 break;
647 }
648 case SPI_BUS:
649 case SDIO_BUS:
650 /* map new one */
651 if (!sii->regs[coreidx]) {
652 sii->regs[coreidx] = (void *)(uintptr)sbaddr;
653 ASSERT(GOODREGS(sii->regs[coreidx]));
654 }
655 regs = sii->regs[coreidx];
656 break;
657
658
659 default:
660 ASSERT(0);
661 regs = NULL;
662 break;
663 }
664
665 return regs;
666}
667
668/* Return the address of sbadmatch0/1/2/3 register */
669static volatile uint32 *
670sb_admatch(si_info_t *sii, uint asidx)
671{
672 sbconfig_t *sb;
673 volatile uint32 *addrm;
674
675 sb = REGS2SB(sii->curmap);
676
677 switch (asidx) {
678 case 0:
679 addrm = &sb->sbadmatch0;
680 break;
681
682 case 1:
683 addrm = &sb->sbadmatch1;
684 break;
685
686 case 2:
687 addrm = &sb->sbadmatch2;
688 break;
689
690 case 3:
691 addrm = &sb->sbadmatch3;
692 break;
693
694 default:
695 SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
696 return 0;
697 }
698
699 return (addrm);
700}
701
702/* Return the number of address spaces in current core */
703int
704sb_numaddrspaces(si_t *sih)
705{
706 si_info_t *sii;
707 sbconfig_t *sb;
708
709 sii = SI_INFO(sih);
710 sb = REGS2SB(sii->curmap);
711
712 /* + 1 because of enumeration space */
713 return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
714}
715
716/* Return the address of the nth address space in the current core */
717uint32
718sb_addrspace(si_t *sih, uint asidx)
719{
720 si_info_t *sii;
721
722 sii = SI_INFO(sih);
723
724 return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
725}
726
727/* Return the size of the nth address space in the current core */
728uint32
729sb_addrspacesize(si_t *sih, uint asidx)
730{
731 si_info_t *sii;
732
733 sii = SI_INFO(sih);
734
735 return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
736}
737
738
739/* do buffered registers update */
740void
741sb_commit(si_t *sih)
742{
743 si_info_t *sii;
744 uint origidx;
745 uint intr_val = 0;
746
747 sii = SI_INFO(sih);
748
749 origidx = sii->curidx;
750 ASSERT(GOODIDX(origidx));
751
752 INTR_OFF(sii, intr_val);
753
754 /* switch over to chipcommon core if there is one, else use pci */
755 if (sii->pub.ccrev != NOREV) {
756 chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
757 ASSERT(ccregs != NULL);
758
759 /* do the buffer registers update */
760 W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
761 W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
762 } else
763 ASSERT(0);
764
765 /* restore core index */
766 sb_setcoreidx(sih, origidx);
767 INTR_RESTORE(sii, intr_val);
768}
769
770void
771sb_core_disable(si_t *sih, uint32 bits)
772{
773 si_info_t *sii;
774 volatile uint32 dummy;
775 sbconfig_t *sb;
776
777 sii = SI_INFO(sih);
778
779 ASSERT(GOODREGS(sii->curmap));
780 sb = REGS2SB(sii->curmap);
781
782 /* if core is already in reset, just return */
783 if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
784 return;
785
786 /* if clocks are not enabled, put into reset and return */
787 if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
788 goto disable;
789
790 /* set target reject and spin until busy is clear (preserve core-specific bits) */
791 OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
792 dummy = R_SBREG(sii, &sb->sbtmstatelow);
Dmitry Shmidt83252322012-03-16 12:52:00 -0700793 BCM_REFERENCE(dummy);
Dmitry Shmidt8ce17272011-05-24 11:14:33 -0700794 OSL_DELAY(1);
795 SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
796 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
797 SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
798
799 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
800 OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
801 dummy = R_SBREG(sii, &sb->sbimstate);
Dmitry Shmidt83252322012-03-16 12:52:00 -0700802 BCM_REFERENCE(dummy);
Dmitry Shmidt8ce17272011-05-24 11:14:33 -0700803 OSL_DELAY(1);
804 SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
805 }
806
807 /* set reset and reject while enabling the clocks */
808 W_SBREG(sii, &sb->sbtmstatelow,
809 (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
810 SBTML_REJ | SBTML_RESET));
811 dummy = R_SBREG(sii, &sb->sbtmstatelow);
Dmitry Shmidt83252322012-03-16 12:52:00 -0700812 BCM_REFERENCE(dummy);
Dmitry Shmidt8ce17272011-05-24 11:14:33 -0700813 OSL_DELAY(10);
814
815 /* don't forget to clear the initiator reject bit */
816 if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
817 AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
818
819disable:
820 /* leave reset and reject asserted */
821 W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
822 OSL_DELAY(1);
823}
824
825/* reset and re-enable a core
826 * inputs:
827 * bits - core specific bits that are set during and after reset sequence
828 * resetbits - core specific bits that are set only during reset sequence
829 */
830void
831sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
832{
833 si_info_t *sii;
834 sbconfig_t *sb;
835 volatile uint32 dummy;
836
837 sii = SI_INFO(sih);
838 ASSERT(GOODREGS(sii->curmap));
839 sb = REGS2SB(sii->curmap);
840
841 /*
842 * Must do the disable sequence first to work for arbitrary current core state.
843 */
844 sb_core_disable(sih, (bits | resetbits));
845
846 /*
847 * Now do the initialization sequence.
848 */
849
850 /* set reset while enabling the clock and forcing them on throughout the core */
851 W_SBREG(sii, &sb->sbtmstatelow,
852 (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
853 SBTML_RESET));
854 dummy = R_SBREG(sii, &sb->sbtmstatelow);
Dmitry Shmidt83252322012-03-16 12:52:00 -0700855 BCM_REFERENCE(dummy);
Dmitry Shmidt8ce17272011-05-24 11:14:33 -0700856 OSL_DELAY(1);
857
858 if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
859 W_SBREG(sii, &sb->sbtmstatehigh, 0);
860 }
861 if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
862 AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
863 }
864
865 /* clear reset and allow it to propagate throughout the core */
866 W_SBREG(sii, &sb->sbtmstatelow,
867 ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
868 dummy = R_SBREG(sii, &sb->sbtmstatelow);
Dmitry Shmidt83252322012-03-16 12:52:00 -0700869 BCM_REFERENCE(dummy);
Dmitry Shmidt8ce17272011-05-24 11:14:33 -0700870 OSL_DELAY(1);
871
872 /* leave clock enabled */
873 W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
874 dummy = R_SBREG(sii, &sb->sbtmstatelow);
Dmitry Shmidt83252322012-03-16 12:52:00 -0700875 BCM_REFERENCE(dummy);
Dmitry Shmidt8ce17272011-05-24 11:14:33 -0700876 OSL_DELAY(1);
877}
878
879/*
880 * Set the initiator timeout for the "master core".
881 * The master core is defined to be the core in control
882 * of the chip and so it issues accesses to non-memory
883 * locations (Because of dma *any* core can access memeory).
884 *
885 * The routine uses the bus to decide who is the master:
886 * SI_BUS => mips
887 * JTAG_BUS => chipc
888 * PCI_BUS => pci or pcie
889 * PCMCIA_BUS => pcmcia
890 * SDIO_BUS => pcmcia
891 *
892 * This routine exists so callers can disable initiator
893 * timeouts so accesses to very slow devices like otp
894 * won't cause an abort. The routine allows arbitrary
895 * settings of the service and request timeouts, though.
896 *
897 * Returns the timeout state before changing it or -1
898 * on error.
899 */
900
901#define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
902
903uint32
904sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
905{
906 si_info_t *sii;
907 uint origidx;
908 uint intr_val = 0;
909 uint32 tmp, ret = 0xffffffff;
910 sbconfig_t *sb;
911
912 sii = SI_INFO(sih);
913
914 if ((to & ~TO_MASK) != 0)
915 return ret;
916
917 /* Figure out the master core */
918 if (idx == BADIDX) {
919 switch (BUSTYPE(sii->pub.bustype)) {
920 case PCI_BUS:
921 idx = sii->pub.buscoreidx;
922 break;
923 case JTAG_BUS:
924 idx = SI_CC_IDX;
925 break;
926 case PCMCIA_BUS:
927 case SDIO_BUS:
928 idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
929 break;
930 case SI_BUS:
931 idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
932 break;
933 default:
934 ASSERT(0);
935 }
936 if (idx == BADIDX)
937 return ret;
938 }
939
940 INTR_OFF(sii, intr_val);
941 origidx = si_coreidx(sih);
942
943 sb = REGS2SB(sb_setcoreidx(sih, idx));
944
945 tmp = R_SBREG(sii, &sb->sbimconfiglow);
946 ret = tmp & TO_MASK;
947 W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
948
949 sb_commit(sih);
950 sb_setcoreidx(sih, origidx);
951 INTR_RESTORE(sii, intr_val);
952 return ret;
953}
954
955uint32
956sb_base(uint32 admatch)
957{
958 uint32 base;
959 uint type;
960
961 type = admatch & SBAM_TYPE_MASK;
962 ASSERT(type < 3);
963
964 base = 0;
965
966 if (type == 0) {
967 base = admatch & SBAM_BASE0_MASK;
968 } else if (type == 1) {
969 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
970 base = admatch & SBAM_BASE1_MASK;
971 } else if (type == 2) {
972 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
973 base = admatch & SBAM_BASE2_MASK;
974 }
975
976 return (base);
977}
978
979uint32
980sb_size(uint32 admatch)
981{
982 uint32 size;
983 uint type;
984
985 type = admatch & SBAM_TYPE_MASK;
986 ASSERT(type < 3);
987
988 size = 0;
989
990 if (type == 0) {
991 size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
992 } else if (type == 1) {
993 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
994 size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
995 } else if (type == 2) {
996 ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
997 size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
998 }
999
1000 return (size);
1001}