| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * SN Platform GRU Driver | 
 | 3 |  * | 
 | 4 |  *            GRU DRIVER TABLES, MACROS, externs, etc | 
 | 5 |  * | 
 | 6 |  *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved. | 
 | 7 |  * | 
 | 8 |  *  This program is free software; you can redistribute it and/or modify | 
 | 9 |  *  it under the terms of the GNU General Public License as published by | 
 | 10 |  *  the Free Software Foundation; either version 2 of the License, or | 
 | 11 |  *  (at your option) any later version. | 
 | 12 |  * | 
 | 13 |  *  This program is distributed in the hope that it will be useful, | 
 | 14 |  *  but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 15 |  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 16 |  *  GNU General Public License for more details. | 
 | 17 |  * | 
 | 18 |  *  You should have received a copy of the GNU General Public License | 
 | 19 |  *  along with this program; if not, write to the Free Software | 
 | 20 |  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA | 
 | 21 |  */ | 
 | 22 |  | 
 | 23 | #ifndef __GRUTABLES_H__ | 
 | 24 | #define __GRUTABLES_H__ | 
 | 25 |  | 
 | 26 | /* | 
| Jack Steiner | 9ca8e40c1 | 2008-07-29 22:34:02 -0700 | [diff] [blame] | 27 |  * GRU Chiplet: | 
 | 28 |  *   The GRU is a user addressible memory accelerator. It provides | 
 | 29 |  *   several forms of load, store, memset, bcopy instructions. In addition, it | 
 | 30 |  *   contains special instructions for AMOs, sending messages to message | 
 | 31 |  *   queues, etc. | 
 | 32 |  * | 
 | 33 |  *   The GRU is an integral part of the node controller. It connects | 
 | 34 |  *   directly to the cpu socket. In its current implementation, there are 2 | 
 | 35 |  *   GRU chiplets in the node controller on each blade (~node). | 
 | 36 |  * | 
 | 37 |  *   The entire GRU memory space is fully coherent and cacheable by the cpus. | 
 | 38 |  * | 
 | 39 |  *   Each GRU chiplet has a physical memory map that looks like the following: | 
 | 40 |  * | 
 | 41 |  *   	+-----------------+ | 
 | 42 |  *   	|/////////////////| | 
 | 43 |  *   	|/////////////////| | 
 | 44 |  *   	|/////////////////| | 
 | 45 |  *   	|/////////////////| | 
 | 46 |  *   	|/////////////////| | 
 | 47 |  *   	|/////////////////| | 
 | 48 |  *   	|/////////////////| | 
 | 49 |  *   	|/////////////////| | 
 | 50 |  *   	+-----------------+ | 
 | 51 |  *   	|  system control | | 
 | 52 |  *   	+-----------------+        _______ +-------------+ | 
 | 53 |  *   	|/////////////////|       /        |             | | 
 | 54 |  *   	|/////////////////|      /         |             | | 
 | 55 |  *   	|/////////////////|     /          | instructions| | 
 | 56 |  *   	|/////////////////|    /           |             | | 
 | 57 |  *   	|/////////////////|   /            |             | | 
 | 58 |  *   	|/////////////////|  /             |-------------| | 
 | 59 |  *   	|/////////////////| /              |             | | 
 | 60 |  *   	+-----------------+                |             | | 
 | 61 |  *   	|   context 15    |                |  data       | | 
 | 62 |  *   	+-----------------+                |             | | 
 | 63 |  *   	|    ......       | \              |             | | 
 | 64 |  *   	+-----------------+  \____________ +-------------+ | 
 | 65 |  *   	|   context 1     | | 
 | 66 |  *   	+-----------------+ | 
 | 67 |  *   	|   context 0     | | 
 | 68 |  *   	+-----------------+ | 
 | 69 |  * | 
 | 70 |  *   Each of the "contexts" is a chunk of memory that can be mmaped into user | 
 | 71 |  *   space. The context consists of 2 parts: | 
 | 72 |  * | 
 | 73 |  *  	- an instruction space that can be directly accessed by the user | 
 | 74 |  *  	  to issue GRU instructions and to check instruction status. | 
 | 75 |  * | 
 | 76 |  *  	- a data area that acts as normal RAM. | 
 | 77 |  * | 
 | 78 |  *   User instructions contain virtual addresses of data to be accessed by the | 
 | 79 |  *   GRU. The GRU contains a TLB that is used to convert these user virtual | 
 | 80 |  *   addresses to physical addresses. | 
 | 81 |  * | 
 | 82 |  *   The "system control" area of the GRU chiplet is used by the kernel driver | 
 | 83 |  *   to manage user contexts and to perform functions such as TLB dropin and | 
 | 84 |  *   purging. | 
 | 85 |  * | 
 | 86 |  *   One context may be reserved for the kernel and used for cross-partition | 
 | 87 |  *   communication. The GRU will also be used to asynchronously zero out | 
 | 88 |  *   large blocks of memory (not currently implemented). | 
 | 89 |  * | 
 | 90 |  * | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 91 |  * Tables: | 
 | 92 |  * | 
 | 93 |  * 	VDATA-VMA Data		- Holds a few parameters. Head of linked list of | 
 | 94 |  * 				  GTS tables for threads using the GSEG | 
 | 95 |  * 	GTS - Gru Thread State  - contains info for managing a GSEG context. A | 
 | 96 |  * 				  GTS is allocated for each thread accessing a | 
 | 97 |  * 				  GSEG. | 
 | 98 |  *     	GTD - GRU Thread Data   - contains shadow copy of GRU data when GSEG is | 
 | 99 |  *     				  not loaded into a GRU | 
 | 100 |  *	GMS - GRU Memory Struct - Used to manage TLB shootdowns. Tracks GRUs | 
 | 101 |  *				  where a GSEG has been loaded. Similar to | 
 | 102 |  *				  an mm_struct but for GRU. | 
 | 103 |  * | 
 | 104 |  *	GS  - GRU State 	- Used to manage the state of a GRU chiplet | 
 | 105 |  *	BS  - Blade State	- Used to manage state of all GRU chiplets | 
 | 106 |  *				  on a blade | 
 | 107 |  * | 
 | 108 |  * | 
 | 109 |  *  Normal task tables for task using GRU. | 
 | 110 |  *  		- 2 threads in process | 
 | 111 |  *  		- 2 GSEGs open in process | 
 | 112 |  *  		- GSEG1 is being used by both threads | 
 | 113 |  *  		- GSEG2 is used only by thread 2 | 
 | 114 |  * | 
 | 115 |  *       task -->| | 
 | 116 |  *       task ---+---> mm ->------ (notifier) -------+-> gms | 
 | 117 |  *                     |                             | | 
 | 118 |  *                     |--> vma -> vdata ---> gts--->|		GSEG1 (thread1) | 
 | 119 |  *                     |                  |          | | 
 | 120 |  *                     |                  +-> gts--->|		GSEG1 (thread2) | 
 | 121 |  *                     |                             | | 
 | 122 |  *                     |--> vma -> vdata ---> gts--->|		GSEG2 (thread2) | 
 | 123 |  *                     . | 
 | 124 |  *                     . | 
 | 125 |  * | 
 | 126 |  *  GSEGs are marked DONTCOPY on fork | 
 | 127 |  * | 
 | 128 |  * At open | 
 | 129 |  * 	file.private_data -> NULL | 
 | 130 |  * | 
 | 131 |  * At mmap, | 
 | 132 |  * 	vma -> vdata | 
 | 133 |  * | 
 | 134 |  * After gseg reference | 
 | 135 |  * 	vma -> vdata ->gts | 
 | 136 |  * | 
 | 137 |  * After fork | 
 | 138 |  *   parent | 
 | 139 |  * 	vma -> vdata -> gts | 
 | 140 |  *   child | 
 | 141 |  * 	(vma is not copied) | 
 | 142 |  * | 
 | 143 |  */ | 
 | 144 |  | 
 | 145 | #include <linux/rmap.h> | 
 | 146 | #include <linux/interrupt.h> | 
 | 147 | #include <linux/mutex.h> | 
 | 148 | #include <linux/wait.h> | 
 | 149 | #include <linux/mmu_notifier.h> | 
 | 150 | #include "gru.h" | 
| Jack Steiner | 7e796a7 | 2009-06-17 16:28:30 -0700 | [diff] [blame] | 151 | #include "grulib.h" | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 152 | #include "gruhandles.h" | 
 | 153 |  | 
 | 154 | extern struct gru_stats_s gru_stats; | 
 | 155 | extern struct gru_blade_state *gru_base[]; | 
 | 156 | extern unsigned long gru_start_paddr, gru_end_paddr; | 
| Jack Steiner | 1a2c09e | 2009-06-17 16:28:28 -0700 | [diff] [blame] | 157 | extern void *gru_start_vaddr; | 
| Jack Steiner | e1c3219 | 2009-04-02 16:59:10 -0700 | [diff] [blame] | 158 | extern unsigned int gru_max_gids; | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 159 |  | 
 | 160 | #define GRU_MAX_BLADES		MAX_NUMNODES | 
 | 161 | #define GRU_MAX_GRUS		(GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE) | 
 | 162 |  | 
 | 163 | #define GRU_DRIVER_ID_STR	"SGI GRU Device Driver" | 
 | 164 | #define GRU_DRIVER_VERSION_STR	"0.80" | 
 | 165 |  | 
 | 166 | /* | 
 | 167 |  * GRU statistics. | 
 | 168 |  */ | 
 | 169 | struct gru_stats_s { | 
 | 170 | 	atomic_long_t vdata_alloc; | 
 | 171 | 	atomic_long_t vdata_free; | 
 | 172 | 	atomic_long_t gts_alloc; | 
 | 173 | 	atomic_long_t gts_free; | 
 | 174 | 	atomic_long_t vdata_double_alloc; | 
 | 175 | 	atomic_long_t gts_double_allocate; | 
 | 176 | 	atomic_long_t assign_context; | 
 | 177 | 	atomic_long_t assign_context_failed; | 
 | 178 | 	atomic_long_t free_context; | 
| Jack Steiner | 836ce67 | 2009-06-17 16:28:22 -0700 | [diff] [blame] | 179 | 	atomic_long_t load_user_context; | 
 | 180 | 	atomic_long_t load_kernel_context; | 
 | 181 | 	atomic_long_t lock_kernel_context; | 
 | 182 | 	atomic_long_t unlock_kernel_context; | 
 | 183 | 	atomic_long_t steal_user_context; | 
 | 184 | 	atomic_long_t steal_kernel_context; | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 185 | 	atomic_long_t steal_context_failed; | 
 | 186 | 	atomic_long_t nopfn; | 
 | 187 | 	atomic_long_t break_cow; | 
 | 188 | 	atomic_long_t asid_new; | 
 | 189 | 	atomic_long_t asid_next; | 
 | 190 | 	atomic_long_t asid_wrap; | 
 | 191 | 	atomic_long_t asid_reuse; | 
 | 192 | 	atomic_long_t intr; | 
| Jack Steiner | 4388460 | 2009-04-02 16:59:05 -0700 | [diff] [blame] | 193 | 	atomic_long_t intr_mm_lock_failed; | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 194 | 	atomic_long_t call_os; | 
| Jack Steiner | 4388460 | 2009-04-02 16:59:05 -0700 | [diff] [blame] | 195 | 	atomic_long_t call_os_offnode_reference; | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 196 | 	atomic_long_t call_os_check_for_bug; | 
 | 197 | 	atomic_long_t call_os_wait_queue; | 
 | 198 | 	atomic_long_t user_flush_tlb; | 
 | 199 | 	atomic_long_t user_unload_context; | 
 | 200 | 	atomic_long_t user_exception; | 
| Jack Steiner | 92b3938 | 2009-06-17 16:28:32 -0700 | [diff] [blame] | 201 | 	atomic_long_t set_context_option; | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 202 | 	atomic_long_t migrate_check; | 
 | 203 | 	atomic_long_t migrated_retarget; | 
 | 204 | 	atomic_long_t migrated_unload; | 
 | 205 | 	atomic_long_t migrated_unload_delay; | 
 | 206 | 	atomic_long_t migrated_nopfn_retarget; | 
 | 207 | 	atomic_long_t migrated_nopfn_unload; | 
 | 208 | 	atomic_long_t tlb_dropin; | 
 | 209 | 	atomic_long_t tlb_dropin_fail_no_asid; | 
 | 210 | 	atomic_long_t tlb_dropin_fail_upm; | 
 | 211 | 	atomic_long_t tlb_dropin_fail_invalid; | 
 | 212 | 	atomic_long_t tlb_dropin_fail_range_active; | 
 | 213 | 	atomic_long_t tlb_dropin_fail_idle; | 
 | 214 | 	atomic_long_t tlb_dropin_fail_fmm; | 
| Jack Steiner | cd1334f | 2009-06-17 16:28:19 -0700 | [diff] [blame] | 215 | 	atomic_long_t tlb_dropin_fail_no_exception; | 
 | 216 | 	atomic_long_t tlb_dropin_fail_no_exception_war; | 
| Jack Steiner | 270952a | 2009-06-17 16:28:27 -0700 | [diff] [blame] | 217 | 	atomic_long_t tfh_stale_on_fault; | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 218 | 	atomic_long_t mmu_invalidate_range; | 
 | 219 | 	atomic_long_t mmu_invalidate_page; | 
 | 220 | 	atomic_long_t mmu_clear_flush_young; | 
 | 221 | 	atomic_long_t flush_tlb; | 
 | 222 | 	atomic_long_t flush_tlb_gru; | 
 | 223 | 	atomic_long_t flush_tlb_gru_tgh; | 
 | 224 | 	atomic_long_t flush_tlb_gru_zero_asid; | 
 | 225 |  | 
 | 226 | 	atomic_long_t copy_gpa; | 
 | 227 |  | 
 | 228 | 	atomic_long_t mesq_receive; | 
 | 229 | 	atomic_long_t mesq_receive_none; | 
 | 230 | 	atomic_long_t mesq_send; | 
 | 231 | 	atomic_long_t mesq_send_failed; | 
 | 232 | 	atomic_long_t mesq_noop; | 
 | 233 | 	atomic_long_t mesq_send_unexpected_error; | 
 | 234 | 	atomic_long_t mesq_send_lb_overflow; | 
 | 235 | 	atomic_long_t mesq_send_qlimit_reached; | 
 | 236 | 	atomic_long_t mesq_send_amo_nacked; | 
 | 237 | 	atomic_long_t mesq_send_put_nacked; | 
 | 238 | 	atomic_long_t mesq_qf_not_full; | 
 | 239 | 	atomic_long_t mesq_qf_locked; | 
 | 240 | 	atomic_long_t mesq_qf_noop_not_full; | 
 | 241 | 	atomic_long_t mesq_qf_switch_head_failed; | 
 | 242 | 	atomic_long_t mesq_qf_unexpected_error; | 
 | 243 | 	atomic_long_t mesq_noop_unexpected_error; | 
 | 244 | 	atomic_long_t mesq_noop_lb_overflow; | 
 | 245 | 	atomic_long_t mesq_noop_qlimit_reached; | 
 | 246 | 	atomic_long_t mesq_noop_amo_nacked; | 
 | 247 | 	atomic_long_t mesq_noop_put_nacked; | 
 | 248 |  | 
 | 249 | }; | 
 | 250 |  | 
| Jack Steiner | a24e5e1 | 2009-04-02 16:59:06 -0700 | [diff] [blame] | 251 | enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, | 
 | 252 | 	cchop_deallocate, tghop_invalidate, mcsop_last}; | 
 | 253 |  | 
| Jack Steiner | e56484d | 2009-04-02 16:59:06 -0700 | [diff] [blame] | 254 | struct mcs_op_statistic { | 
 | 255 | 	atomic_long_t	count; | 
 | 256 | 	atomic_long_t	total; | 
 | 257 | 	unsigned long	max; | 
 | 258 | }; | 
 | 259 |  | 
 | 260 | extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; | 
 | 261 |  | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 262 | #define OPT_DPRINT	1 | 
 | 263 | #define OPT_STATS	2 | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 264 |  | 
 | 265 |  | 
 | 266 | #define IRQ_GRU			110	/* Starting IRQ number for interrupts */ | 
 | 267 |  | 
 | 268 | /* Delay in jiffies between attempts to assign a GRU context */ | 
 | 269 | #define GRU_ASSIGN_DELAY	((HZ * 20) / 1000) | 
 | 270 |  | 
 | 271 | /* | 
 | 272 |  * If a process has it's context stolen, min delay in jiffies before trying to | 
 | 273 |  * steal a context from another process. | 
 | 274 |  */ | 
 | 275 | #define GRU_STEAL_DELAY		((HZ * 200) / 1000) | 
 | 276 |  | 
 | 277 | #define STAT(id)	do {						\ | 
| Jack Steiner | 9ca8e40c1 | 2008-07-29 22:34:02 -0700 | [diff] [blame] | 278 | 				if (gru_options & OPT_STATS)		\ | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 279 | 					atomic_long_inc(&gru_stats.id);	\ | 
 | 280 | 			} while (0) | 
 | 281 |  | 
 | 282 | #ifdef CONFIG_SGI_GRU_DEBUG | 
 | 283 | #define gru_dbg(dev, fmt, x...)						\ | 
 | 284 | 	do {								\ | 
| Jack Steiner | 9ca8e40c1 | 2008-07-29 22:34:02 -0700 | [diff] [blame] | 285 | 		if (gru_options & OPT_DPRINT)				\ | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 286 | 			dev_dbg(dev, "%s: " fmt, __func__, x);		\ | 
 | 287 | 	} while (0) | 
 | 288 | #else | 
 | 289 | #define gru_dbg(x...) | 
 | 290 | #endif | 
 | 291 |  | 
 | 292 | /*----------------------------------------------------------------------------- | 
 | 293 |  * ASID management | 
 | 294 |  */ | 
 | 295 | #define MAX_ASID	0xfffff0 | 
 | 296 | #define MIN_ASID	8 | 
 | 297 | #define ASID_INC	8	/* number of regions */ | 
 | 298 |  | 
 | 299 | /* Generate a GRU asid value from a GRU base asid & a virtual address. */ | 
 | 300 | #if defined CONFIG_IA64 | 
 | 301 | #define VADDR_HI_BIT		64 | 
| Randy Dunlap | 6a4ad39 | 2008-08-12 15:08:51 -0700 | [diff] [blame] | 302 | #elif defined CONFIG_X86_64 | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 303 | #define VADDR_HI_BIT		48 | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 304 | #else | 
 | 305 | #error "Unsupported architecture" | 
 | 306 | #endif | 
| Jack Steiner | fe5bb6b | 2009-04-02 16:59:04 -0700 | [diff] [blame] | 307 | #define GRUREGION(addr)		((addr) >> (VADDR_HI_BIT - 3) & 3) | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 308 | #define GRUASID(asid, addr)	((asid) + GRUREGION(addr)) | 
 | 309 |  | 
 | 310 | /*------------------------------------------------------------------------------ | 
 | 311 |  *  File & VMS Tables | 
 | 312 |  */ | 
 | 313 |  | 
 | 314 | struct gru_state; | 
 | 315 |  | 
 | 316 | /* | 
 | 317 |  * This structure is pointed to from the mmstruct via the notifier pointer. | 
 | 318 |  * There is one of these per address space. | 
 | 319 |  */ | 
| Jack Steiner | fe5bb6b | 2009-04-02 16:59:04 -0700 | [diff] [blame] | 320 | struct gru_mm_tracker {				/* pack to reduce size */ | 
 | 321 | 	unsigned int		mt_asid_gen:24;	/* ASID wrap count */ | 
 | 322 | 	unsigned int		mt_asid:24;	/* current base ASID for gru */ | 
 | 323 | 	unsigned short		mt_ctxbitmap:16;/* bitmap of contexts using | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 324 | 						   asid */ | 
| Jack Steiner | fe5bb6b | 2009-04-02 16:59:04 -0700 | [diff] [blame] | 325 | } __attribute__ ((packed)); | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 326 |  | 
 | 327 | struct gru_mm_struct { | 
 | 328 | 	struct mmu_notifier	ms_notifier; | 
 | 329 | 	atomic_t		ms_refcnt; | 
 | 330 | 	spinlock_t		ms_asid_lock;	/* protects ASID assignment */ | 
 | 331 | 	atomic_t		ms_range_active;/* num range_invals active */ | 
 | 332 | 	char			ms_released; | 
 | 333 | 	wait_queue_head_t	ms_wait_queue; | 
 | 334 | 	DECLARE_BITMAP(ms_asidmap, GRU_MAX_GRUS); | 
 | 335 | 	struct gru_mm_tracker	ms_asids[GRU_MAX_GRUS]; | 
 | 336 | }; | 
 | 337 |  | 
 | 338 | /* | 
 | 339 |  * One of these structures is allocated when a GSEG is mmaped. The | 
 | 340 |  * structure is pointed to by the vma->vm_private_data field in the vma struct. | 
 | 341 |  */ | 
 | 342 | struct gru_vma_data { | 
 | 343 | 	spinlock_t		vd_lock;	/* Serialize access to vma */ | 
 | 344 | 	struct list_head	vd_head;	/* head of linked list of gts */ | 
 | 345 | 	long			vd_user_options;/* misc user option flags */ | 
 | 346 | 	int			vd_cbr_au_count; | 
 | 347 | 	int			vd_dsr_au_count; | 
 | 348 | }; | 
 | 349 |  | 
 | 350 | /* | 
 | 351 |  * One of these is allocated for each thread accessing a mmaped GRU. A linked | 
 | 352 |  * list of these structure is hung off the struct gru_vma_data in the mm_struct. | 
 | 353 |  */ | 
 | 354 | struct gru_thread_state { | 
 | 355 | 	struct list_head	ts_next;	/* list - head at vma-private */ | 
 | 356 | 	struct mutex		ts_ctxlock;	/* load/unload CTX lock */ | 
 | 357 | 	struct mm_struct	*ts_mm;		/* mm currently mapped to | 
 | 358 | 						   context */ | 
 | 359 | 	struct vm_area_struct	*ts_vma;	/* vma of GRU context */ | 
 | 360 | 	struct gru_state	*ts_gru;	/* GRU where the context is | 
 | 361 | 						   loaded */ | 
 | 362 | 	struct gru_mm_struct	*ts_gms;	/* asid & ioproc struct */ | 
 | 363 | 	unsigned long		ts_cbr_map;	/* map of allocated CBRs */ | 
 | 364 | 	unsigned long		ts_dsr_map;	/* map of allocated DATA | 
 | 365 | 						   resources */ | 
 | 366 | 	unsigned long		ts_steal_jiffies;/* jiffies when context last | 
 | 367 | 						    stolen */ | 
 | 368 | 	long			ts_user_options;/* misc user option flags */ | 
 | 369 | 	pid_t			ts_tgid_owner;	/* task that is using the | 
 | 370 | 						   context - for migration */ | 
| Jack Steiner | 7b8274e | 2009-04-02 16:59:12 -0700 | [diff] [blame] | 371 | 	unsigned short		ts_sizeavail;	/* Pagesizes in use */ | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 372 | 	int			ts_tsid;	/* thread that owns the | 
 | 373 | 						   structure */ | 
 | 374 | 	int			ts_tlb_int_select;/* target cpu if interrupts | 
 | 375 | 						     enabled */ | 
 | 376 | 	int			ts_ctxnum;	/* context number where the | 
 | 377 | 						   context is loaded */ | 
 | 378 | 	atomic_t		ts_refcnt;	/* reference count GTS */ | 
 | 379 | 	unsigned char		ts_dsr_au_count;/* Number of DSR resources | 
 | 380 | 						   required for contest */ | 
 | 381 | 	unsigned char		ts_cbr_au_count;/* Number of CBR resources | 
 | 382 | 						   required for contest */ | 
| Jack Steiner | b1b19fc | 2009-06-17 16:28:33 -0700 | [diff] [blame] | 383 | 	char			ts_cch_req_slice;/* CCH packet slice */ | 
| Jack Steiner | fe5bb6b | 2009-04-02 16:59:04 -0700 | [diff] [blame] | 384 | 	char			ts_blade;	/* If >= 0, migrate context if | 
 | 385 | 						   ref from diferent blade */ | 
| Jack Steiner | 7b8274e | 2009-04-02 16:59:12 -0700 | [diff] [blame] | 386 | 	char			ts_force_cch_reload; | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 387 | 	char			ts_force_unload;/* force context to be unloaded | 
 | 388 | 						   after migration */ | 
 | 389 | 	char			ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each | 
 | 390 | 							  allocated CB */ | 
| Jack Steiner | 940229b | 2009-06-17 16:28:24 -0700 | [diff] [blame] | 391 | 	int			ts_data_valid;	/* Indicates if ts_gdata has | 
 | 392 | 						   valid data */ | 
| Jack Steiner | 7e796a7 | 2009-06-17 16:28:30 -0700 | [diff] [blame] | 393 | 	struct gts_statistics	ustats;		/* User statistics */ | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 394 | 	unsigned long		ts_gdata[0];	/* save area for GRU data (CB, | 
 | 395 | 						   DS, CBE) */ | 
 | 396 | }; | 
 | 397 |  | 
 | 398 | /* | 
 | 399 |  * Threaded programs actually allocate an array of GSEGs when a context is | 
 | 400 |  * created. Each thread uses a separate GSEG. TSID is the index into the GSEG | 
 | 401 |  * array. | 
 | 402 |  */ | 
 | 403 | #define TSID(a, v)		(((a) - (v)->vm_start) / GRU_GSEG_PAGESIZE) | 
 | 404 | #define UGRUADDR(gts)		((gts)->ts_vma->vm_start +		\ | 
 | 405 | 					(gts)->ts_tsid * GRU_GSEG_PAGESIZE) | 
 | 406 |  | 
 | 407 | #define NULLCTX			(-1)	/* if context not loaded into GRU */ | 
 | 408 |  | 
 | 409 | /*----------------------------------------------------------------------------- | 
 | 410 |  *  GRU State Tables | 
 | 411 |  */ | 
 | 412 |  | 
 | 413 | /* | 
 | 414 |  * One of these exists for each GRU chiplet. | 
 | 415 |  */ | 
 | 416 | struct gru_state { | 
 | 417 | 	struct gru_blade_state	*gs_blade;		/* GRU state for entire | 
 | 418 | 							   blade */ | 
 | 419 | 	unsigned long		gs_gru_base_paddr;	/* Physical address of | 
 | 420 | 							   gru segments (64) */ | 
 | 421 | 	void			*gs_gru_base_vaddr;	/* Virtual address of | 
 | 422 | 							   gru segments (64) */ | 
| Jack Steiner | e1c3219 | 2009-04-02 16:59:10 -0700 | [diff] [blame] | 423 | 	unsigned short		gs_gid;			/* unique GRU number */ | 
 | 424 | 	unsigned short		gs_blade_id;		/* blade of GRU */ | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 425 | 	unsigned char		gs_tgh_local_shift;	/* used to pick TGH for | 
 | 426 | 							   local flush */ | 
 | 427 | 	unsigned char		gs_tgh_first_remote;	/* starting TGH# for | 
 | 428 | 							   remote flush */ | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 429 | 	spinlock_t		gs_asid_lock;		/* lock used for | 
 | 430 | 							   assigning asids */ | 
 | 431 | 	spinlock_t		gs_lock;		/* lock used for | 
 | 432 | 							   assigning contexts */ | 
 | 433 |  | 
 | 434 | 	/* -- the following are protected by the gs_asid_lock spinlock ---- */ | 
 | 435 | 	unsigned int		gs_asid;		/* Next availe ASID */ | 
 | 436 | 	unsigned int		gs_asid_limit;		/* Limit of available | 
 | 437 | 							   ASIDs */ | 
 | 438 | 	unsigned int		gs_asid_gen;		/* asid generation. | 
 | 439 | 							   Inc on wrap */ | 
 | 440 |  | 
 | 441 | 	/* --- the following fields are protected by the gs_lock spinlock --- */ | 
 | 442 | 	unsigned long		gs_context_map;		/* bitmap to manage | 
 | 443 | 							   contexts in use */ | 
 | 444 | 	unsigned long		gs_cbr_map;		/* bitmap to manage CB | 
 | 445 | 							   resources */ | 
 | 446 | 	unsigned long		gs_dsr_map;		/* bitmap used to manage | 
 | 447 | 							   DATA resources */ | 
 | 448 | 	unsigned int		gs_reserved_cbrs;	/* Number of kernel- | 
 | 449 | 							   reserved cbrs */ | 
 | 450 | 	unsigned int		gs_reserved_dsr_bytes;	/* Bytes of kernel- | 
 | 451 | 							   reserved dsrs */ | 
 | 452 | 	unsigned short		gs_active_contexts;	/* number of contexts | 
 | 453 | 							   in use */ | 
 | 454 | 	struct gru_thread_state	*gs_gts[GRU_NUM_CCH];	/* GTS currently using | 
 | 455 | 							   the context */ | 
 | 456 | }; | 
 | 457 |  | 
 | 458 | /* | 
 | 459 |  * This structure contains the GRU state for all the GRUs on a blade. | 
 | 460 |  */ | 
 | 461 | struct gru_blade_state { | 
 | 462 | 	void			*kernel_cb;		/* First kernel | 
 | 463 | 							   reserved cb */ | 
 | 464 | 	void			*kernel_dsr;		/* First kernel | 
 | 465 | 							   reserved DSR */ | 
| Jack Steiner | 836ce67 | 2009-06-17 16:28:22 -0700 | [diff] [blame] | 466 | 	struct rw_semaphore	bs_kgts_sema;		/* lock for kgts */ | 
 | 467 | 	struct gru_thread_state *bs_kgts;		/* GTS for kernel use */ | 
 | 468 |  | 
| Jack Steiner | 4a7a17c | 2009-06-17 16:28:25 -0700 | [diff] [blame] | 469 | 	/* ---- the following are used for managing kernel async GRU CBRs --- */ | 
 | 470 | 	int			bs_async_dsr_bytes;	/* DSRs for async */ | 
 | 471 | 	int			bs_async_cbrs;		/* CBRs AU for async */ | 
 | 472 | 	struct completion	*bs_async_wq; | 
 | 473 |  | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 474 | 	/* ---- the following are protected by the bs_lock spinlock ---- */ | 
 | 475 | 	spinlock_t		bs_lock;		/* lock used for | 
 | 476 | 							   stealing contexts */ | 
 | 477 | 	int			bs_lru_ctxnum;		/* STEAL - last context | 
 | 478 | 							   stolen */ | 
 | 479 | 	struct gru_state	*bs_lru_gru;		/* STEAL - last gru | 
 | 480 | 							   stolen */ | 
 | 481 |  | 
 | 482 | 	struct gru_state	bs_grus[GRU_CHIPLETS_PER_BLADE]; | 
 | 483 | }; | 
 | 484 |  | 
 | 485 | /*----------------------------------------------------------------------------- | 
 | 486 |  * Address Primitives | 
 | 487 |  */ | 
 | 488 | #define get_tfm_for_cpu(g, c)						\ | 
 | 489 | 	((struct gru_tlb_fault_map *)get_tfm((g)->gs_gru_base_vaddr, (c))) | 
 | 490 | #define get_tfh_by_index(g, i)						\ | 
 | 491 | 	((struct gru_tlb_fault_handle *)get_tfh((g)->gs_gru_base_vaddr, (i))) | 
 | 492 | #define get_tgh_by_index(g, i)						\ | 
 | 493 | 	((struct gru_tlb_global_handle *)get_tgh((g)->gs_gru_base_vaddr, (i))) | 
 | 494 | #define get_cbe_by_index(g, i)						\ | 
 | 495 | 	((struct gru_control_block_extended *)get_cbe((g)->gs_gru_base_vaddr,\ | 
 | 496 | 			(i))) | 
 | 497 |  | 
 | 498 | /*----------------------------------------------------------------------------- | 
 | 499 |  * Useful Macros | 
 | 500 |  */ | 
 | 501 |  | 
 | 502 | /* Given a blade# & chiplet#, get a pointer to the GRU */ | 
 | 503 | #define get_gru(b, c)		(&gru_base[b]->bs_grus[c]) | 
 | 504 |  | 
 | 505 | /* Number of bytes to save/restore when unloading/loading GRU contexts */ | 
 | 506 | #define DSR_BYTES(dsr)		((dsr) * GRU_DSR_AU_BYTES) | 
 | 507 | #define CBR_BYTES(cbr)		((cbr) * GRU_HANDLE_BYTES * GRU_CBR_AU_SIZE * 2) | 
 | 508 |  | 
 | 509 | /* Convert a user CB number to the actual CBRNUM */ | 
 | 510 | #define thread_cbr_number(gts, n) ((gts)->ts_cbr_idx[(n) / GRU_CBR_AU_SIZE] \ | 
 | 511 | 				  * GRU_CBR_AU_SIZE + (n) % GRU_CBR_AU_SIZE) | 
 | 512 |  | 
 | 513 | /* Convert a gid to a pointer to the GRU */ | 
 | 514 | #define GID_TO_GRU(gid)							\ | 
 | 515 | 	(gru_base[(gid) / GRU_CHIPLETS_PER_BLADE] ?			\ | 
 | 516 | 		(&gru_base[(gid) / GRU_CHIPLETS_PER_BLADE]->		\ | 
 | 517 | 			bs_grus[(gid) % GRU_CHIPLETS_PER_BLADE]) :	\ | 
 | 518 | 	 NULL) | 
 | 519 |  | 
 | 520 | /* Scan all active GRUs in a GRU bitmap */ | 
 | 521 | #define for_each_gru_in_bitmap(gid, map)				\ | 
 | 522 | 	for ((gid) = find_first_bit((map), GRU_MAX_GRUS); (gid) < GRU_MAX_GRUS;\ | 
 | 523 | 		(gid)++, (gid) = find_next_bit((map), GRU_MAX_GRUS, (gid))) | 
 | 524 |  | 
 | 525 | /* Scan all active GRUs on a specific blade */ | 
 | 526 | #define for_each_gru_on_blade(gru, nid, i)				\ | 
 | 527 | 	for ((gru) = gru_base[nid]->bs_grus, (i) = 0;			\ | 
 | 528 | 			(i) < GRU_CHIPLETS_PER_BLADE;			\ | 
 | 529 | 			(i)++, (gru)++) | 
 | 530 |  | 
| Jack Steiner | e1c3219 | 2009-04-02 16:59:10 -0700 | [diff] [blame] | 531 | /* Scan all GRUs */ | 
 | 532 | #define foreach_gid(gid)						\ | 
 | 533 | 	for ((gid) = 0; (gid) < gru_max_gids; (gid)++) | 
 | 534 |  | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 535 | /* Scan all active GTSs on a gru. Note: must hold ss_lock to use this macro. */ | 
 | 536 | #define for_each_gts_on_gru(gts, gru, ctxnum)				\ | 
 | 537 | 	for ((ctxnum) = 0; (ctxnum) < GRU_NUM_CCH; (ctxnum)++)		\ | 
 | 538 | 		if (((gts) = (gru)->gs_gts[ctxnum])) | 
 | 539 |  | 
 | 540 | /* Scan each CBR whose bit is set in a TFM (or copy of) */ | 
 | 541 | #define for_each_cbr_in_tfm(i, map)					\ | 
 | 542 | 	for ((i) = find_first_bit(map, GRU_NUM_CBE);			\ | 
 | 543 | 			(i) < GRU_NUM_CBE;				\ | 
 | 544 | 			(i)++, (i) = find_next_bit(map, GRU_NUM_CBE, i)) | 
 | 545 |  | 
 | 546 | /* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */ | 
 | 547 | #define for_each_cbr_in_allocation_map(i, map, k)			\ | 
 | 548 | 	for ((k) = find_first_bit(map, GRU_CBR_AU); (k) < GRU_CBR_AU;	\ | 
 | 549 | 			(k) = find_next_bit(map, GRU_CBR_AU, (k) + 1)) 	\ | 
 | 550 | 		for ((i) = (k)*GRU_CBR_AU_SIZE;				\ | 
 | 551 | 				(i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++) | 
 | 552 |  | 
 | 553 | /* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */ | 
 | 554 | #define for_each_dsr_in_allocation_map(i, map, k)			\ | 
 | 555 | 	for ((k) = find_first_bit((const unsigned long *)map, GRU_DSR_AU);\ | 
 | 556 | 			(k) < GRU_DSR_AU;				\ | 
 | 557 | 			(k) = find_next_bit((const unsigned long *)map,	\ | 
 | 558 | 					  GRU_DSR_AU, (k) + 1))		\ | 
 | 559 | 		for ((i) = (k) * GRU_DSR_AU_CL;				\ | 
 | 560 | 				(i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++) | 
 | 561 |  | 
 | 562 | #define gseg_physical_address(gru, ctxnum)				\ | 
 | 563 | 		((gru)->gs_gru_base_paddr + ctxnum * GRU_GSEG_STRIDE) | 
 | 564 | #define gseg_virtual_address(gru, ctxnum)				\ | 
 | 565 | 		((gru)->gs_gru_base_vaddr + ctxnum * GRU_GSEG_STRIDE) | 
 | 566 |  | 
 | 567 | /*----------------------------------------------------------------------------- | 
 | 568 |  * Lock / Unlock GRU handles | 
 | 569 |  * 	Use the "delresp" bit in the handle as a "lock" bit. | 
 | 570 |  */ | 
 | 571 |  | 
 | 572 | /* Lock hierarchy checking enabled only in emulator */ | 
 | 573 |  | 
| Jack Steiner | 9cc9b05 | 2009-06-17 16:28:19 -0700 | [diff] [blame] | 574 | /* 0 = lock failed, 1 = locked */ | 
 | 575 | static inline int __trylock_handle(void *h) | 
 | 576 | { | 
 | 577 | 	return !test_and_set_bit(1, h); | 
 | 578 | } | 
 | 579 |  | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 580 | static inline void __lock_handle(void *h) | 
 | 581 | { | 
 | 582 | 	while (test_and_set_bit(1, h)) | 
 | 583 | 		cpu_relax(); | 
 | 584 | } | 
 | 585 |  | 
 | 586 | static inline void __unlock_handle(void *h) | 
 | 587 | { | 
 | 588 | 	clear_bit(1, h); | 
 | 589 | } | 
 | 590 |  | 
| Jack Steiner | 9cc9b05 | 2009-06-17 16:28:19 -0700 | [diff] [blame] | 591 | static inline int trylock_cch_handle(struct gru_context_configuration_handle *cch) | 
 | 592 | { | 
 | 593 | 	return __trylock_handle(cch); | 
 | 594 | } | 
 | 595 |  | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 596 | static inline void lock_cch_handle(struct gru_context_configuration_handle *cch) | 
 | 597 | { | 
 | 598 | 	__lock_handle(cch); | 
 | 599 | } | 
 | 600 |  | 
 | 601 | static inline void unlock_cch_handle(struct gru_context_configuration_handle | 
 | 602 | 				     *cch) | 
 | 603 | { | 
 | 604 | 	__unlock_handle(cch); | 
 | 605 | } | 
 | 606 |  | 
 | 607 | static inline void lock_tgh_handle(struct gru_tlb_global_handle *tgh) | 
 | 608 | { | 
 | 609 | 	__lock_handle(tgh); | 
 | 610 | } | 
 | 611 |  | 
 | 612 | static inline void unlock_tgh_handle(struct gru_tlb_global_handle *tgh) | 
 | 613 | { | 
 | 614 | 	__unlock_handle(tgh); | 
 | 615 | } | 
 | 616 |  | 
| Jack Steiner | 836ce67 | 2009-06-17 16:28:22 -0700 | [diff] [blame] | 617 | static inline int is_kernel_context(struct gru_thread_state *gts) | 
 | 618 | { | 
 | 619 | 	return !gts->ts_mm; | 
 | 620 | } | 
 | 621 |  | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 622 | /*----------------------------------------------------------------------------- | 
 | 623 |  * Function prototypes & externs | 
 | 624 |  */ | 
 | 625 | struct gru_unload_context_req; | 
 | 626 |  | 
| Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 627 | extern const struct vm_operations_struct gru_vm_ops; | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 628 | extern struct device *grudev; | 
 | 629 |  | 
 | 630 | extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, | 
 | 631 | 				int tsid); | 
 | 632 | extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct | 
 | 633 | 				*vma, int tsid); | 
 | 634 | extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct | 
 | 635 | 				*vma, int tsid); | 
| Jack Steiner | d57c82b | 2009-06-17 16:28:20 -0700 | [diff] [blame] | 636 | extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, | 
 | 637 | 		int blade); | 
 | 638 | extern void gru_load_context(struct gru_thread_state *gts); | 
 | 639 | extern void gru_steal_context(struct gru_thread_state *gts, int blade_id); | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 640 | extern void gru_unload_context(struct gru_thread_state *gts, int savestate); | 
| Jack Steiner | 7b8274e | 2009-04-02 16:59:12 -0700 | [diff] [blame] | 641 | extern int gru_update_cch(struct gru_thread_state *gts, int force_unload); | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 642 | extern void gts_drop(struct gru_thread_state *gts); | 
 | 643 | extern void gru_tgh_flush_init(struct gru_state *gru); | 
| Jack Steiner | d5826dd | 2009-06-17 16:28:28 -0700 | [diff] [blame] | 644 | extern int gru_kservices_init(void); | 
 | 645 | extern void gru_kservices_exit(void); | 
| Jack Steiner | 9cc9b05 | 2009-06-17 16:28:19 -0700 | [diff] [blame] | 646 | extern int gru_dump_chiplet_request(unsigned long arg); | 
| Jack Steiner | 7e796a7 | 2009-06-17 16:28:30 -0700 | [diff] [blame] | 647 | extern long gru_get_gseg_statistics(unsigned long arg); | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 648 | extern irqreturn_t gru_intr(int irq, void *dev_id); | 
 | 649 | extern int gru_handle_user_call_os(unsigned long address); | 
 | 650 | extern int gru_user_flush_tlb(unsigned long arg); | 
 | 651 | extern int gru_user_unload_context(unsigned long arg); | 
 | 652 | extern int gru_get_exception_detail(unsigned long arg); | 
| Jack Steiner | 92b3938 | 2009-06-17 16:28:32 -0700 | [diff] [blame] | 653 | extern int gru_set_context_option(unsigned long address); | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 654 | extern int gru_cpu_fault_map_id(void); | 
 | 655 | extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); | 
 | 656 | extern void gru_flush_all_tlb(struct gru_state *gru); | 
 | 657 | extern int gru_proc_init(void); | 
 | 658 | extern void gru_proc_exit(void); | 
 | 659 |  | 
| Jack Steiner | 364b76d | 2009-06-17 16:28:20 -0700 | [diff] [blame] | 660 | extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, | 
 | 661 | 		int cbr_au_count, int dsr_au_count, int options, int tsid); | 
| Jack Steiner | 9ca8e40c1 | 2008-07-29 22:34:02 -0700 | [diff] [blame] | 662 | extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 663 | 		int cbr_au_count, char *cbmap); | 
| Jack Steiner | 9ca8e40c1 | 2008-07-29 22:34:02 -0700 | [diff] [blame] | 664 | extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 665 | 		int dsr_au_count, char *dsmap); | 
 | 666 | extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf); | 
 | 667 | extern struct gru_mm_struct *gru_register_mmu_notifier(void); | 
 | 668 | extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms); | 
 | 669 |  | 
| Jack Steiner | eb5bd5e | 2009-06-17 16:28:26 -0700 | [diff] [blame] | 670 | extern int gru_ktest(unsigned long arg); | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 671 | extern void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, | 
 | 672 | 					unsigned long len); | 
 | 673 |  | 
| Jack Steiner | 9ca8e40c1 | 2008-07-29 22:34:02 -0700 | [diff] [blame] | 674 | extern unsigned long gru_options; | 
| Jack Steiner | 13d1949 | 2008-07-29 22:33:55 -0700 | [diff] [blame] | 675 |  | 
 | 676 | #endif /* __GRUTABLES_H__ */ |