| Jack Steiner | 78cf1de | 2008-07-29 22:33:56 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * SN Platform GRU Driver | 
 | 3 |  * | 
 | 4 |  *              FILE OPERATIONS & DRIVER INITIALIZATION | 
 | 5 |  * | 
 | 6 |  * This file supports the user system call for file open, close, mmap, etc. | 
 | 7 |  * This also incudes the driver initialization code. | 
 | 8 |  * | 
 | 9 |  *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved. | 
 | 10 |  * | 
 | 11 |  *  This program is free software; you can redistribute it and/or modify | 
 | 12 |  *  it under the terms of the GNU General Public License as published by | 
 | 13 |  *  the Free Software Foundation; either version 2 of the License, or | 
 | 14 |  *  (at your option) any later version. | 
 | 15 |  * | 
 | 16 |  *  This program is distributed in the hope that it will be useful, | 
 | 17 |  *  but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 18 |  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 19 |  *  GNU General Public License for more details. | 
 | 20 |  * | 
 | 21 |  *  You should have received a copy of the GNU General Public License | 
 | 22 |  *  along with this program; if not, write to the Free Software | 
 | 23 |  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA | 
 | 24 |  */ | 
 | 25 |  | 
 | 26 | #include <linux/module.h> | 
 | 27 | #include <linux/kernel.h> | 
 | 28 | #include <linux/errno.h> | 
 | 29 | #include <linux/slab.h> | 
 | 30 | #include <linux/mm.h> | 
 | 31 | #include <linux/io.h> | 
 | 32 | #include <linux/smp_lock.h> | 
 | 33 | #include <linux/spinlock.h> | 
 | 34 | #include <linux/device.h> | 
 | 35 | #include <linux/miscdevice.h> | 
 | 36 | #include <linux/interrupt.h> | 
 | 37 | #include <linux/proc_fs.h> | 
 | 38 | #include <linux/uaccess.h> | 
 | 39 | #include "gru.h" | 
 | 40 | #include "grulib.h" | 
 | 41 | #include "grutables.h" | 
 | 42 |  | 
 | 43 | #if defined CONFIG_X86_64 | 
 | 44 | #include <asm/genapic.h> | 
 | 45 | #include <asm/irq.h> | 
 | 46 | #define IS_UV()		is_uv_system() | 
 | 47 | #elif defined CONFIG_IA64 | 
 | 48 | #include <asm/system.h> | 
 | 49 | #include <asm/sn/simulator.h> | 
 | 50 | /* temp support for running on hardware simulator */ | 
 | 51 | #define IS_UV()		IS_MEDUSA() || ia64_platform_is("uv") | 
 | 52 | #else | 
 | 53 | #define IS_UV()		0 | 
 | 54 | #endif | 
 | 55 |  | 
 | 56 | #include <asm/uv/uv_hub.h> | 
 | 57 | #include <asm/uv/uv_mmrs.h> | 
 | 58 |  | 
 | 59 | struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; | 
 | 60 | unsigned long gru_start_paddr, gru_end_paddr __read_mostly; | 
 | 61 | struct gru_stats_s gru_stats; | 
 | 62 |  | 
 | 63 | /* Guaranteed user available resources on each node */ | 
 | 64 | static int max_user_cbrs, max_user_dsr_bytes; | 
 | 65 |  | 
 | 66 | static struct file_operations gru_fops; | 
 | 67 | static struct miscdevice gru_miscdev; | 
 | 68 |  | 
 | 69 |  | 
 | 70 | /* | 
 | 71 |  * gru_vma_close | 
 | 72 |  * | 
 | 73 |  * Called when unmapping a device mapping. Frees all gru resources | 
 | 74 |  * and tables belonging to the vma. | 
 | 75 |  */ | 
 | 76 | static void gru_vma_close(struct vm_area_struct *vma) | 
 | 77 | { | 
 | 78 | 	struct gru_vma_data *vdata; | 
 | 79 | 	struct gru_thread_state *gts; | 
 | 80 | 	struct list_head *entry, *next; | 
 | 81 |  | 
 | 82 | 	if (!vma->vm_private_data) | 
 | 83 | 		return; | 
 | 84 |  | 
 | 85 | 	vdata = vma->vm_private_data; | 
 | 86 | 	vma->vm_private_data = NULL; | 
 | 87 | 	gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file, | 
 | 88 | 				vdata); | 
 | 89 | 	list_for_each_safe(entry, next, &vdata->vd_head) { | 
 | 90 | 		gts = | 
 | 91 | 		    list_entry(entry, struct gru_thread_state, ts_next); | 
 | 92 | 		list_del(>s->ts_next); | 
 | 93 | 		mutex_lock(>s->ts_ctxlock); | 
 | 94 | 		if (gts->ts_gru) | 
 | 95 | 			gru_unload_context(gts, 0); | 
 | 96 | 		mutex_unlock(>s->ts_ctxlock); | 
 | 97 | 		gts_drop(gts); | 
 | 98 | 	} | 
 | 99 | 	kfree(vdata); | 
 | 100 | 	STAT(vdata_free); | 
 | 101 | } | 
 | 102 |  | 
 | 103 | /* | 
 | 104 |  * gru_file_mmap | 
 | 105 |  * | 
 | 106 |  * Called when mmaping the device.  Initializes the vma with a fault handler | 
 | 107 |  * and private data structure necessary to allocate, track, and free the | 
 | 108 |  * underlying pages. | 
 | 109 |  */ | 
 | 110 | static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) | 
 | 111 | { | 
 | 112 | 	if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE)) | 
 | 113 | 		return -EPERM; | 
 | 114 |  | 
| Jack Steiner | 9ca8e40c1 | 2008-07-29 22:34:02 -0700 | [diff] [blame] | 115 | 	if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || | 
| Jack Steiner | 923f7f6 | 2008-10-15 22:05:13 -0700 | [diff] [blame] | 116 | 	    			vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) | 
| Jack Steiner | 9ca8e40c1 | 2008-07-29 22:34:02 -0700 | [diff] [blame] | 117 | 		return -EINVAL; | 
 | 118 |  | 
| Jack Steiner | 78cf1de | 2008-07-29 22:33:56 -0700 | [diff] [blame] | 119 | 	vma->vm_flags |= | 
 | 120 | 	    (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP | | 
 | 121 | 			VM_RESERVED); | 
 | 122 | 	vma->vm_page_prot = PAGE_SHARED; | 
 | 123 | 	vma->vm_ops = &gru_vm_ops; | 
 | 124 |  | 
 | 125 | 	vma->vm_private_data = gru_alloc_vma_data(vma, 0); | 
 | 126 | 	if (!vma->vm_private_data) | 
 | 127 | 		return -ENOMEM; | 
 | 128 |  | 
 | 129 | 	gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n", | 
 | 130 | 		file, vma->vm_start, vma, vma->vm_private_data); | 
 | 131 | 	return 0; | 
 | 132 | } | 
 | 133 |  | 
 | 134 | /* | 
 | 135 |  * Create a new GRU context | 
 | 136 |  */ | 
 | 137 | static int gru_create_new_context(unsigned long arg) | 
 | 138 | { | 
 | 139 | 	struct gru_create_context_req req; | 
 | 140 | 	struct vm_area_struct *vma; | 
 | 141 | 	struct gru_vma_data *vdata; | 
 | 142 | 	int ret = -EINVAL; | 
 | 143 |  | 
 | 144 |  | 
 | 145 | 	if (copy_from_user(&req, (void __user *)arg, sizeof(req))) | 
 | 146 | 		return -EFAULT; | 
 | 147 |  | 
 | 148 | 	if (req.data_segment_bytes == 0 || | 
 | 149 | 				req.data_segment_bytes > max_user_dsr_bytes) | 
 | 150 | 		return -EINVAL; | 
 | 151 | 	if (!req.control_blocks || !req.maximum_thread_count || | 
 | 152 | 				req.control_blocks > max_user_cbrs) | 
 | 153 | 		return -EINVAL; | 
 | 154 |  | 
 | 155 | 	if (!(req.options & GRU_OPT_MISS_MASK)) | 
 | 156 | 		req.options |= GRU_OPT_MISS_FMM_INTR; | 
 | 157 |  | 
 | 158 | 	down_write(¤t->mm->mmap_sem); | 
 | 159 | 	vma = gru_find_vma(req.gseg); | 
 | 160 | 	if (vma) { | 
 | 161 | 		vdata = vma->vm_private_data; | 
 | 162 | 		vdata->vd_user_options = req.options; | 
 | 163 | 		vdata->vd_dsr_au_count = | 
 | 164 | 		    GRU_DS_BYTES_TO_AU(req.data_segment_bytes); | 
 | 165 | 		vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); | 
 | 166 | 		ret = 0; | 
 | 167 | 	} | 
 | 168 | 	up_write(¤t->mm->mmap_sem); | 
 | 169 |  | 
 | 170 | 	return ret; | 
 | 171 | } | 
 | 172 |  | 
 | 173 | /* | 
 | 174 |  * Get GRU configuration info (temp - for emulator testing) | 
 | 175 |  */ | 
 | 176 | static long gru_get_config_info(unsigned long arg) | 
 | 177 | { | 
 | 178 | 	struct gru_config_info info; | 
 | 179 | 	int nodesperblade; | 
 | 180 |  | 
 | 181 | 	if (num_online_nodes() > 1 && | 
 | 182 | 			(uv_node_to_blade_id(1) == uv_node_to_blade_id(0))) | 
 | 183 | 		nodesperblade = 2; | 
 | 184 | 	else | 
 | 185 | 		nodesperblade = 1; | 
 | 186 | 	info.cpus = num_online_cpus(); | 
 | 187 | 	info.nodes = num_online_nodes(); | 
 | 188 | 	info.blades = info.nodes / nodesperblade; | 
 | 189 | 	info.chiplets = GRU_CHIPLETS_PER_BLADE * info.blades; | 
 | 190 |  | 
 | 191 | 	if (copy_to_user((void __user *)arg, &info, sizeof(info))) | 
 | 192 | 		return -EFAULT; | 
 | 193 | 	return 0; | 
 | 194 | } | 
 | 195 |  | 
 | 196 | /* | 
 | 197 |  * Get GRU chiplet status | 
 | 198 |  */ | 
 | 199 | static long gru_get_chiplet_status(unsigned long arg) | 
 | 200 | { | 
 | 201 | 	struct gru_state *gru; | 
 | 202 | 	struct gru_chiplet_info info; | 
 | 203 |  | 
 | 204 | 	if (copy_from_user(&info, (void __user *)arg, sizeof(info))) | 
 | 205 | 		return -EFAULT; | 
 | 206 |  | 
 | 207 | 	if (info.node == -1) | 
 | 208 | 		info.node = numa_node_id(); | 
 | 209 | 	if (info.node >= num_possible_nodes() || | 
 | 210 | 			info.chiplet >= GRU_CHIPLETS_PER_HUB || | 
 | 211 | 			info.node < 0 || info.chiplet < 0) | 
 | 212 | 		return -EINVAL; | 
 | 213 |  | 
 | 214 | 	info.blade = uv_node_to_blade_id(info.node); | 
 | 215 | 	gru = get_gru(info.blade, info.chiplet); | 
 | 216 |  | 
 | 217 | 	info.total_dsr_bytes = GRU_NUM_DSR_BYTES; | 
 | 218 | 	info.total_cbr = GRU_NUM_CB; | 
 | 219 | 	info.total_user_dsr_bytes = GRU_NUM_DSR_BYTES - | 
 | 220 | 		gru->gs_reserved_dsr_bytes; | 
 | 221 | 	info.total_user_cbr = GRU_NUM_CB - gru->gs_reserved_cbrs; | 
 | 222 | 	info.free_user_dsr_bytes = hweight64(gru->gs_dsr_map) * | 
 | 223 | 			GRU_DSR_AU_BYTES; | 
 | 224 | 	info.free_user_cbr = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; | 
 | 225 |  | 
 | 226 | 	if (copy_to_user((void __user *)arg, &info, sizeof(info))) | 
 | 227 | 		return -EFAULT; | 
 | 228 | 	return 0; | 
 | 229 | } | 
 | 230 |  | 
 | 231 | /* | 
 | 232 |  * gru_file_unlocked_ioctl | 
 | 233 |  * | 
 | 234 |  * Called to update file attributes via IOCTL calls. | 
 | 235 |  */ | 
 | 236 | static long gru_file_unlocked_ioctl(struct file *file, unsigned int req, | 
 | 237 | 				    unsigned long arg) | 
 | 238 | { | 
 | 239 | 	int err = -EBADRQC; | 
 | 240 |  | 
 | 241 | 	gru_dbg(grudev, "file %p\n", file); | 
 | 242 |  | 
 | 243 | 	switch (req) { | 
 | 244 | 	case GRU_CREATE_CONTEXT: | 
 | 245 | 		err = gru_create_new_context(arg); | 
 | 246 | 		break; | 
 | 247 | 	case GRU_SET_TASK_SLICE: | 
 | 248 | 		err = gru_set_task_slice(arg); | 
 | 249 | 		break; | 
 | 250 | 	case GRU_USER_GET_EXCEPTION_DETAIL: | 
 | 251 | 		err = gru_get_exception_detail(arg); | 
 | 252 | 		break; | 
 | 253 | 	case GRU_USER_UNLOAD_CONTEXT: | 
 | 254 | 		err = gru_user_unload_context(arg); | 
 | 255 | 		break; | 
 | 256 | 	case GRU_GET_CHIPLET_STATUS: | 
 | 257 | 		err = gru_get_chiplet_status(arg); | 
 | 258 | 		break; | 
 | 259 | 	case GRU_USER_FLUSH_TLB: | 
 | 260 | 		err = gru_user_flush_tlb(arg); | 
 | 261 | 		break; | 
 | 262 | 	case GRU_USER_CALL_OS: | 
 | 263 | 		err = gru_handle_user_call_os(arg); | 
 | 264 | 		break; | 
 | 265 | 	case GRU_GET_CONFIG_INFO: | 
 | 266 | 		err = gru_get_config_info(arg); | 
 | 267 | 		break; | 
 | 268 | 	} | 
 | 269 | 	return err; | 
 | 270 | } | 
 | 271 |  | 
 | 272 | /* | 
 | 273 |  * Called at init time to build tables for all GRUs that are present in the | 
 | 274 |  * system. | 
 | 275 |  */ | 
 | 276 | static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, | 
 | 277 | 			     void *vaddr, int nid, int bid, int grunum) | 
 | 278 | { | 
 | 279 | 	spin_lock_init(&gru->gs_lock); | 
 | 280 | 	spin_lock_init(&gru->gs_asid_lock); | 
 | 281 | 	gru->gs_gru_base_paddr = paddr; | 
 | 282 | 	gru->gs_gru_base_vaddr = vaddr; | 
 | 283 | 	gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum; | 
 | 284 | 	gru->gs_blade = gru_base[bid]; | 
 | 285 | 	gru->gs_blade_id = bid; | 
 | 286 | 	gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; | 
 | 287 | 	gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; | 
 | 288 | 	gru_tgh_flush_init(gru); | 
 | 289 | 	gru_dbg(grudev, "bid %d, nid %d, gru %x, vaddr %p (0x%lx)\n", | 
 | 290 | 		bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, | 
 | 291 | 		gru->gs_gru_base_paddr); | 
 | 292 | 	gru_kservices_init(gru); | 
 | 293 | } | 
 | 294 |  | 
 | 295 | static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) | 
 | 296 | { | 
 | 297 | 	int pnode, nid, bid, chip; | 
 | 298 | 	int cbrs, dsrbytes, n; | 
 | 299 | 	int order = get_order(sizeof(struct gru_blade_state)); | 
 | 300 | 	struct page *page; | 
 | 301 | 	struct gru_state *gru; | 
 | 302 | 	unsigned long paddr; | 
 | 303 | 	void *vaddr; | 
 | 304 |  | 
 | 305 | 	max_user_cbrs = GRU_NUM_CB; | 
 | 306 | 	max_user_dsr_bytes = GRU_NUM_DSR_BYTES; | 
 | 307 | 	for_each_online_node(nid) { | 
 | 308 | 		bid = uv_node_to_blade_id(nid); | 
 | 309 | 		pnode = uv_node_to_pnode(nid); | 
 | 310 | 		if (gru_base[bid]) | 
 | 311 | 			continue; | 
 | 312 | 		page = alloc_pages_node(nid, GFP_KERNEL, order); | 
 | 313 | 		if (!page) | 
 | 314 | 			goto fail; | 
 | 315 | 		gru_base[bid] = page_address(page); | 
 | 316 | 		memset(gru_base[bid], 0, sizeof(struct gru_blade_state)); | 
 | 317 | 		gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0]; | 
 | 318 | 		spin_lock_init(&gru_base[bid]->bs_lock); | 
 | 319 |  | 
 | 320 | 		dsrbytes = 0; | 
 | 321 | 		cbrs = 0; | 
 | 322 | 		for (gru = gru_base[bid]->bs_grus, chip = 0; | 
 | 323 | 		     		chip < GRU_CHIPLETS_PER_BLADE; | 
 | 324 | 				chip++, gru++) { | 
 | 325 | 			paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); | 
 | 326 | 			vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); | 
 | 327 | 			gru_init_chiplet(gru, paddr, vaddr, bid, nid, chip); | 
 | 328 | 			n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; | 
 | 329 | 			cbrs = max(cbrs, n); | 
 | 330 | 			n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; | 
 | 331 | 			dsrbytes = max(dsrbytes, n); | 
 | 332 | 		} | 
 | 333 | 		max_user_cbrs = min(max_user_cbrs, cbrs); | 
 | 334 | 		max_user_dsr_bytes = min(max_user_dsr_bytes, dsrbytes); | 
 | 335 | 	} | 
 | 336 |  | 
 | 337 | 	return 0; | 
 | 338 |  | 
 | 339 | fail: | 
 | 340 | 	for (nid--; nid >= 0; nid--) | 
 | 341 | 		free_pages((unsigned long)gru_base[nid], order); | 
 | 342 | 	return -ENOMEM; | 
 | 343 | } | 
 | 344 |  | 
 | 345 | #ifdef CONFIG_IA64 | 
 | 346 |  | 
 | 347 | static int get_base_irq(void) | 
 | 348 | { | 
 | 349 | 	return IRQ_GRU; | 
 | 350 | } | 
 | 351 |  | 
 | 352 | #elif defined CONFIG_X86_64 | 
 | 353 |  | 
 | 354 | static void noop(unsigned int irq) | 
 | 355 | { | 
 | 356 | } | 
 | 357 |  | 
 | 358 | static struct irq_chip gru_chip = { | 
 | 359 | 	.name		= "gru", | 
 | 360 | 	.mask		= noop, | 
 | 361 | 	.unmask		= noop, | 
 | 362 | 	.ack		= noop, | 
 | 363 | }; | 
 | 364 |  | 
 | 365 | static int get_base_irq(void) | 
 | 366 | { | 
 | 367 | 	set_irq_chip(IRQ_GRU, &gru_chip); | 
 | 368 | 	set_irq_chip(IRQ_GRU + 1, &gru_chip); | 
 | 369 | 	return IRQ_GRU; | 
 | 370 | } | 
 | 371 | #endif | 
 | 372 |  | 
 | 373 | /* | 
 | 374 |  * gru_init | 
 | 375 |  * | 
 | 376 |  * Called at boot or module load time to initialize the GRUs. | 
 | 377 |  */ | 
 | 378 | static int __init gru_init(void) | 
 | 379 | { | 
 | 380 | 	int ret, irq, chip; | 
 | 381 | 	char id[10]; | 
 | 382 | 	void *gru_start_vaddr; | 
 | 383 |  | 
 | 384 | 	if (!IS_UV()) | 
 | 385 | 		return 0; | 
 | 386 |  | 
 | 387 | #if defined CONFIG_IA64 | 
 | 388 | 	gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ | 
 | 389 | #else | 
 | 390 | 	gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & | 
 | 391 | 				0x7fffffffffffUL; | 
 | 392 |  | 
 | 393 | #endif | 
 | 394 | 	gru_start_vaddr = __va(gru_start_paddr); | 
 | 395 | 	gru_end_paddr = gru_start_paddr + MAX_NUMNODES * GRU_SIZE; | 
 | 396 | 	printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", | 
 | 397 | 	       gru_start_paddr, gru_end_paddr); | 
 | 398 | 	irq = get_base_irq(); | 
 | 399 | 	for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { | 
 | 400 | 		ret = request_irq(irq + chip, gru_intr, 0, id, NULL); | 
| Jack Steiner | 923f7f6 | 2008-10-15 22:05:13 -0700 | [diff] [blame] | 401 | 		/* TODO: fix irq handling on x86. For now ignore failures because | 
 | 402 | 		 * interrupts are not required & not yet fully supported */ | 
 | 403 | 		if (ret) { | 
 | 404 | 			printk("!!!WARNING: GRU ignoring request failure!!!\n"); | 
 | 405 | 			ret = 0; | 
 | 406 | 		} | 
| Jack Steiner | 78cf1de | 2008-07-29 22:33:56 -0700 | [diff] [blame] | 407 | 		if (ret) { | 
 | 408 | 			printk(KERN_ERR "%s: request_irq failed\n", | 
 | 409 | 			       GRU_DRIVER_ID_STR); | 
 | 410 | 			goto exit1; | 
 | 411 | 		} | 
 | 412 | 	} | 
 | 413 |  | 
 | 414 | 	ret = misc_register(&gru_miscdev); | 
 | 415 | 	if (ret) { | 
 | 416 | 		printk(KERN_ERR "%s: misc_register failed\n", | 
 | 417 | 		       GRU_DRIVER_ID_STR); | 
 | 418 | 		goto exit1; | 
 | 419 | 	} | 
 | 420 |  | 
 | 421 | 	ret = gru_proc_init(); | 
 | 422 | 	if (ret) { | 
 | 423 | 		printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); | 
 | 424 | 		goto exit2; | 
 | 425 | 	} | 
 | 426 |  | 
 | 427 | 	ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); | 
 | 428 | 	if (ret) { | 
 | 429 | 		printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); | 
 | 430 | 		goto exit3; | 
 | 431 | 	} | 
 | 432 |  | 
 | 433 | 	printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, | 
 | 434 | 	       GRU_DRIVER_VERSION_STR); | 
 | 435 | 	return 0; | 
 | 436 |  | 
 | 437 | exit3: | 
 | 438 | 	gru_proc_exit(); | 
 | 439 | exit2: | 
 | 440 | 	misc_deregister(&gru_miscdev); | 
 | 441 | exit1: | 
 | 442 | 	for (--chip; chip >= 0; chip--) | 
 | 443 | 		free_irq(irq + chip, NULL); | 
 | 444 | 	return ret; | 
 | 445 |  | 
 | 446 | } | 
 | 447 |  | 
 | 448 | static void __exit gru_exit(void) | 
 | 449 | { | 
 | 450 | 	int i, bid; | 
 | 451 | 	int order = get_order(sizeof(struct gru_state) * | 
 | 452 | 			      GRU_CHIPLETS_PER_BLADE); | 
 | 453 |  | 
| Robin Holt | 8275d10 | 2008-09-13 02:33:22 -0700 | [diff] [blame] | 454 | 	if (!IS_UV()) | 
 | 455 | 		return; | 
 | 456 |  | 
| Jack Steiner | 78cf1de | 2008-07-29 22:33:56 -0700 | [diff] [blame] | 457 | 	for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) | 
 | 458 | 		free_irq(IRQ_GRU + i, NULL); | 
 | 459 |  | 
 | 460 | 	for (bid = 0; bid < GRU_MAX_BLADES; bid++) | 
 | 461 | 		free_pages((unsigned long)gru_base[bid], order); | 
 | 462 |  | 
 | 463 | 	misc_deregister(&gru_miscdev); | 
 | 464 | 	gru_proc_exit(); | 
 | 465 | } | 
 | 466 |  | 
 | 467 | static struct file_operations gru_fops = { | 
 | 468 | 	.owner		= THIS_MODULE, | 
 | 469 | 	.unlocked_ioctl	= gru_file_unlocked_ioctl, | 
 | 470 | 	.mmap		= gru_file_mmap, | 
 | 471 | }; | 
 | 472 |  | 
 | 473 | static struct miscdevice gru_miscdev = { | 
 | 474 | 	.minor		= MISC_DYNAMIC_MINOR, | 
 | 475 | 	.name		= "gru", | 
 | 476 | 	.fops		= &gru_fops, | 
 | 477 | }; | 
 | 478 |  | 
 | 479 | struct vm_operations_struct gru_vm_ops = { | 
 | 480 | 	.close		= gru_vma_close, | 
 | 481 | 	.fault		= gru_fault, | 
 | 482 | }; | 
 | 483 |  | 
| Dean Nelson | 026bde1 | 2008-12-02 08:06:01 -0600 | [diff] [blame] | 484 | fs_initcall(gru_init); | 
| Jack Steiner | 78cf1de | 2008-07-29 22:33:56 -0700 | [diff] [blame] | 485 | module_exit(gru_exit); | 
 | 486 |  | 
| Jack Steiner | 9ca8e40c1 | 2008-07-29 22:34:02 -0700 | [diff] [blame] | 487 | module_param(gru_options, ulong, 0644); | 
 | 488 | MODULE_PARM_DESC(gru_options, "Various debug options"); | 
| Jack Steiner | 78cf1de | 2008-07-29 22:33:56 -0700 | [diff] [blame] | 489 |  | 
 | 490 | MODULE_AUTHOR("Silicon Graphics, Inc."); | 
 | 491 | MODULE_LICENSE("GPL"); | 
 | 492 | MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR); | 
 | 493 | MODULE_VERSION(GRU_DRIVER_VERSION_STR); | 
 | 494 |  |