blob: a5340cfe9bab9a499e2c9c89c2c0f3ba4130247c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Russ Anderson95ff4392005-04-25 13:19:11 -07006 * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
8#ifndef _ASM_IA64_SN_PDA_H
9#define _ASM_IA64_SN_PDA_H
10
11#include <linux/cache.h>
12#include <asm/percpu.h>
13#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
15
16/*
17 * CPU-specific data structure.
18 *
19 * One of these structures is allocated for each cpu of a NUMA system.
20 *
21 * This structure provides a convenient way of keeping together
22 * all SN per-cpu data structures.
23 */
24
25typedef struct pda_s {
26
27 /* Having a pointer in the begining of PDA tends to increase
28 * the chance of having this pointer in cache. (Yes something
29 * else gets pushed out). Doing this reduces the number of memory
30 * access to all nodepda variables to be one
31 */
32 struct nodepda_s *p_nodepda; /* Pointer to Per node PDA */
33 struct subnodepda_s *p_subnodepda; /* Pointer to CPU subnode PDA */
34
35 /*
36 * Support for SN LEDs
37 */
38 volatile short *led_address;
39 u8 led_state;
40 u8 hb_state; /* supports blinking heartbeat leds */
41 unsigned int hb_count;
42
43 unsigned int idle_flag;
44
45 volatile unsigned long *bedrock_rev_id;
46 volatile unsigned long *pio_write_status_addr;
47 unsigned long pio_write_status_val;
48 volatile unsigned long *pio_shub_war_cam_addr;
49
50 unsigned long sn_soft_irr[4];
51 unsigned long sn_in_service_ivecs[4];
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 int sn_lb_int_war_ticks;
53 int sn_last_irq;
54 int sn_first_irq;
55} pda_t;
56
57
58#define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
59
60/*
61 * PDA
62 * Per-cpu private data area for each cpu. The PDA is located immediately after
63 * the IA64 cpu_data area. A full page is allocated for the cp_data area for each
64 * cpu but only a small amout of the page is actually used. We put the SNIA PDA
65 * in the same page as the cpu_data area. Note that there is a check in the setup
66 * code to verify that we don't overflow the page.
67 *
68 * Seems like we should should cache-line align the pda so that any changes in the
69 * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
70 * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
71 */
72DECLARE_PER_CPU(struct pda_s, pda_percpu);
73
74#define pda (&__ia64_per_cpu_var(pda_percpu))
75
76#define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
77
78#endif /* _ASM_IA64_SN_PDA_H */