| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * IBM PowerPC Virtual I/O Infrastructure Support. | 
|  | 3 | * | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 4 | *    Copyright (c) 2003,2008 IBM Corp. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | *     Dave Engebretsen engebret@us.ibm.com | 
|  | 6 | *     Santiago Leon santil@us.ibm.com | 
|  | 7 | *     Hollis Blanchard <hollisb@us.ibm.com> | 
| Stephen Rothwell | 19dbd0f | 2005-07-12 17:50:26 +1000 | [diff] [blame] | 8 | *     Stephen Rothwell | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 9 | *     Robert Jennings <rcjenn@us.ibm.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * | 
|  | 11 | *      This program is free software; you can redistribute it and/or | 
|  | 12 | *      modify it under the terms of the GNU General Public License | 
|  | 13 | *      as published by the Free Software Foundation; either version | 
|  | 14 | *      2 of the License, or (at your option) any later version. | 
|  | 15 | */ | 
|  | 16 |  | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 17 | #include <linux/types.h> | 
|  | 18 | #include <linux/device.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/init.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/console.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <linux/mm.h> | 
|  | 24 | #include <linux/dma-mapping.h> | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 25 | #include <linux/kobject.h> | 
|  | 26 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <asm/iommu.h> | 
|  | 28 | #include <asm/dma.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/vio.h> | 
| Olaf Hering | 143dcec | 2005-11-08 21:34:36 -0800 | [diff] [blame] | 30 | #include <asm/prom.h> | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 31 | #include <asm/firmware.h> | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 32 | #include <asm/tce.h> | 
|  | 33 | #include <asm/abs_addr.h> | 
|  | 34 | #include <asm/page.h> | 
|  | 35 | #include <asm/hvcall.h> | 
|  | 36 | #include <asm/iseries/vio.h> | 
|  | 37 | #include <asm/iseries/hv_types.h> | 
|  | 38 | #include <asm/iseries/hv_lp_config.h> | 
|  | 39 | #include <asm/iseries/hv_call_xm.h> | 
|  | 40 | #include <asm/iseries/iommu.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
| Stephen Rothwell | 6fccab2 | 2007-09-21 14:32:05 +1000 | [diff] [blame] | 42 | static struct bus_type vio_bus_type; | 
|  | 43 |  | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 44 | static struct vio_dev vio_bus_device  = { /* fake "parent" device */ | 
| Kay Sievers | aab0d37 | 2008-12-04 10:02:56 -0800 | [diff] [blame] | 45 | .name = "vio", | 
| Stephen Rothwell | ac5b33c | 2005-06-21 17:15:54 -0700 | [diff] [blame] | 46 | .type = "", | 
| Kay Sievers | aab0d37 | 2008-12-04 10:02:56 -0800 | [diff] [blame] | 47 | .dev.init_name = "vio", | 
| Stephen Rothwell | ac5b33c | 2005-06-21 17:15:54 -0700 | [diff] [blame] | 48 | .dev.bus = &vio_bus_type, | 
|  | 49 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 |  | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 51 | #ifdef CONFIG_PPC_SMLPAR | 
|  | 52 | /** | 
|  | 53 | * vio_cmo_pool - A pool of IO memory for CMO use | 
|  | 54 | * | 
|  | 55 | * @size: The size of the pool in bytes | 
|  | 56 | * @free: The amount of free memory in the pool | 
|  | 57 | */ | 
|  | 58 | struct vio_cmo_pool { | 
|  | 59 | size_t size; | 
|  | 60 | size_t free; | 
|  | 61 | }; | 
|  | 62 |  | 
|  | 63 | /* How many ms to delay queued balance work */ | 
|  | 64 | #define VIO_CMO_BALANCE_DELAY 100 | 
|  | 65 |  | 
|  | 66 | /* Portion out IO memory to CMO devices by this chunk size */ | 
|  | 67 | #define VIO_CMO_BALANCE_CHUNK 131072 | 
|  | 68 |  | 
|  | 69 | /** | 
|  | 70 | * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement | 
|  | 71 | * | 
|  | 72 | * @vio_dev: struct vio_dev pointer | 
|  | 73 | * @list: pointer to other devices on bus that are being tracked | 
|  | 74 | */ | 
|  | 75 | struct vio_cmo_dev_entry { | 
|  | 76 | struct vio_dev *viodev; | 
|  | 77 | struct list_head list; | 
|  | 78 | }; | 
|  | 79 |  | 
|  | 80 | /** | 
|  | 81 | * vio_cmo - VIO bus accounting structure for CMO entitlement | 
|  | 82 | * | 
|  | 83 | * @lock: spinlock for entire structure | 
|  | 84 | * @balance_q: work queue for balancing system entitlement | 
|  | 85 | * @device_list: list of CMO-enabled devices requiring entitlement | 
|  | 86 | * @entitled: total system entitlement in bytes | 
|  | 87 | * @reserve: pool of memory from which devices reserve entitlement, incl. spare | 
|  | 88 | * @excess: pool of excess entitlement not needed for device reserves or spare | 
|  | 89 | * @spare: IO memory for device hotplug functionality | 
|  | 90 | * @min: minimum necessary for system operation | 
|  | 91 | * @desired: desired memory for system operation | 
|  | 92 | * @curr: bytes currently allocated | 
|  | 93 | * @high: high water mark for IO data usage | 
|  | 94 | */ | 
|  | 95 | struct vio_cmo { | 
|  | 96 | spinlock_t lock; | 
|  | 97 | struct delayed_work balance_q; | 
|  | 98 | struct list_head device_list; | 
|  | 99 | size_t entitled; | 
|  | 100 | struct vio_cmo_pool reserve; | 
|  | 101 | struct vio_cmo_pool excess; | 
|  | 102 | size_t spare; | 
|  | 103 | size_t min; | 
|  | 104 | size_t desired; | 
|  | 105 | size_t curr; | 
|  | 106 | size_t high; | 
|  | 107 | } vio_cmo; | 
|  | 108 |  | 
|  | 109 | /** | 
|  | 110 | * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows | 
|  | 111 | */ | 
|  | 112 | static int vio_cmo_num_OF_devs(void) | 
|  | 113 | { | 
|  | 114 | struct device_node *node_vroot; | 
|  | 115 | int count = 0; | 
|  | 116 |  | 
|  | 117 | /* | 
|  | 118 | * Count the number of vdevice entries with an | 
|  | 119 | * ibm,my-dma-window OF property | 
|  | 120 | */ | 
|  | 121 | node_vroot = of_find_node_by_name(NULL, "vdevice"); | 
|  | 122 | if (node_vroot) { | 
|  | 123 | struct device_node *of_node; | 
|  | 124 | struct property *prop; | 
|  | 125 |  | 
|  | 126 | for_each_child_of_node(node_vroot, of_node) { | 
|  | 127 | prop = of_find_property(of_node, "ibm,my-dma-window", | 
|  | 128 | NULL); | 
|  | 129 | if (prop) | 
|  | 130 | count++; | 
|  | 131 | } | 
|  | 132 | } | 
|  | 133 | of_node_put(node_vroot); | 
|  | 134 | return count; | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | /** | 
|  | 138 | * vio_cmo_alloc - allocate IO memory for CMO-enable devices | 
|  | 139 | * | 
|  | 140 | * @viodev: VIO device requesting IO memory | 
|  | 141 | * @size: size of allocation requested | 
|  | 142 | * | 
|  | 143 | * Allocations come from memory reserved for the devices and any excess | 
|  | 144 | * IO memory available to all devices.  The spare pool used to service | 
|  | 145 | * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be | 
|  | 146 | * made available. | 
|  | 147 | * | 
|  | 148 | * Return codes: | 
|  | 149 | *  0 for successful allocation and -ENOMEM for a failure | 
|  | 150 | */ | 
|  | 151 | static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size) | 
|  | 152 | { | 
|  | 153 | unsigned long flags; | 
|  | 154 | size_t reserve_free = 0; | 
|  | 155 | size_t excess_free = 0; | 
|  | 156 | int ret = -ENOMEM; | 
|  | 157 |  | 
|  | 158 | spin_lock_irqsave(&vio_cmo.lock, flags); | 
|  | 159 |  | 
|  | 160 | /* Determine the amount of free entitlement available in reserve */ | 
|  | 161 | if (viodev->cmo.entitled > viodev->cmo.allocated) | 
|  | 162 | reserve_free = viodev->cmo.entitled - viodev->cmo.allocated; | 
|  | 163 |  | 
|  | 164 | /* If spare is not fulfilled, the excess pool can not be used. */ | 
|  | 165 | if (vio_cmo.spare >= VIO_CMO_MIN_ENT) | 
|  | 166 | excess_free = vio_cmo.excess.free; | 
|  | 167 |  | 
|  | 168 | /* The request can be satisfied */ | 
|  | 169 | if ((reserve_free + excess_free) >= size) { | 
|  | 170 | vio_cmo.curr += size; | 
|  | 171 | if (vio_cmo.curr > vio_cmo.high) | 
|  | 172 | vio_cmo.high = vio_cmo.curr; | 
|  | 173 | viodev->cmo.allocated += size; | 
|  | 174 | size -= min(reserve_free, size); | 
|  | 175 | vio_cmo.excess.free -= size; | 
|  | 176 | ret = 0; | 
|  | 177 | } | 
|  | 178 |  | 
|  | 179 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 
|  | 180 | return ret; | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 | /** | 
|  | 184 | * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices | 
|  | 185 | * @viodev: VIO device freeing IO memory | 
|  | 186 | * @size: size of deallocation | 
|  | 187 | * | 
|  | 188 | * IO memory is freed by the device back to the correct memory pools. | 
|  | 189 | * The spare pool is replenished first from either memory pool, then | 
|  | 190 | * the reserve pool is used to reduce device entitlement, the excess | 
|  | 191 | * pool is used to increase the reserve pool toward the desired entitlement | 
|  | 192 | * target, and then the remaining memory is returned to the pools. | 
|  | 193 | * | 
|  | 194 | */ | 
|  | 195 | static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) | 
|  | 196 | { | 
|  | 197 | unsigned long flags; | 
|  | 198 | size_t spare_needed = 0; | 
|  | 199 | size_t excess_freed = 0; | 
|  | 200 | size_t reserve_freed = size; | 
|  | 201 | size_t tmp; | 
|  | 202 | int balance = 0; | 
|  | 203 |  | 
|  | 204 | spin_lock_irqsave(&vio_cmo.lock, flags); | 
|  | 205 | vio_cmo.curr -= size; | 
|  | 206 |  | 
|  | 207 | /* Amount of memory freed from the excess pool */ | 
|  | 208 | if (viodev->cmo.allocated > viodev->cmo.entitled) { | 
|  | 209 | excess_freed = min(reserve_freed, (viodev->cmo.allocated - | 
|  | 210 | viodev->cmo.entitled)); | 
|  | 211 | reserve_freed -= excess_freed; | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | /* Remove allocation from device */ | 
|  | 215 | viodev->cmo.allocated -= (reserve_freed + excess_freed); | 
|  | 216 |  | 
|  | 217 | /* Spare is a subset of the reserve pool, replenish it first. */ | 
|  | 218 | spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare; | 
|  | 219 |  | 
|  | 220 | /* | 
|  | 221 | * Replenish the spare in the reserve pool from the excess pool. | 
|  | 222 | * This moves entitlement into the reserve pool. | 
|  | 223 | */ | 
|  | 224 | if (spare_needed && excess_freed) { | 
|  | 225 | tmp = min(excess_freed, spare_needed); | 
|  | 226 | vio_cmo.excess.size -= tmp; | 
|  | 227 | vio_cmo.reserve.size += tmp; | 
|  | 228 | vio_cmo.spare += tmp; | 
|  | 229 | excess_freed -= tmp; | 
|  | 230 | spare_needed -= tmp; | 
|  | 231 | balance = 1; | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | /* | 
|  | 235 | * Replenish the spare in the reserve pool from the reserve pool. | 
|  | 236 | * This removes entitlement from the device down to VIO_CMO_MIN_ENT, | 
|  | 237 | * if needed, and gives it to the spare pool. The amount of used | 
|  | 238 | * memory in this pool does not change. | 
|  | 239 | */ | 
|  | 240 | if (spare_needed && reserve_freed) { | 
| Hagen Paul Pfeifer | 732eacc | 2010-10-26 14:22:23 -0700 | [diff] [blame] | 241 | tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 242 |  | 
|  | 243 | vio_cmo.spare += tmp; | 
|  | 244 | viodev->cmo.entitled -= tmp; | 
|  | 245 | reserve_freed -= tmp; | 
|  | 246 | spare_needed -= tmp; | 
|  | 247 | balance = 1; | 
|  | 248 | } | 
|  | 249 |  | 
|  | 250 | /* | 
|  | 251 | * Increase the reserve pool until the desired allocation is met. | 
|  | 252 | * Move an allocation freed from the excess pool into the reserve | 
|  | 253 | * pool and schedule a balance operation. | 
|  | 254 | */ | 
|  | 255 | if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) { | 
|  | 256 | tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size)); | 
|  | 257 |  | 
|  | 258 | vio_cmo.excess.size -= tmp; | 
|  | 259 | vio_cmo.reserve.size += tmp; | 
|  | 260 | excess_freed -= tmp; | 
|  | 261 | balance = 1; | 
|  | 262 | } | 
|  | 263 |  | 
|  | 264 | /* Return memory from the excess pool to that pool */ | 
|  | 265 | if (excess_freed) | 
|  | 266 | vio_cmo.excess.free += excess_freed; | 
|  | 267 |  | 
|  | 268 | if (balance) | 
|  | 269 | schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY); | 
|  | 270 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 
|  | 271 | } | 
|  | 272 |  | 
|  | 273 | /** | 
|  | 274 | * vio_cmo_entitlement_update - Manage system entitlement changes | 
|  | 275 | * | 
|  | 276 | * @new_entitlement: new system entitlement to attempt to accommodate | 
|  | 277 | * | 
|  | 278 | * Increases in entitlement will be used to fulfill the spare entitlement | 
|  | 279 | * and the rest is given to the excess pool.  Decreases, if they are | 
|  | 280 | * possible, come from the excess pool and from unused device entitlement | 
|  | 281 | * | 
|  | 282 | * Returns: 0 on success, -ENOMEM when change can not be made | 
|  | 283 | */ | 
|  | 284 | int vio_cmo_entitlement_update(size_t new_entitlement) | 
|  | 285 | { | 
|  | 286 | struct vio_dev *viodev; | 
|  | 287 | struct vio_cmo_dev_entry *dev_ent; | 
|  | 288 | unsigned long flags; | 
|  | 289 | size_t avail, delta, tmp; | 
|  | 290 |  | 
|  | 291 | spin_lock_irqsave(&vio_cmo.lock, flags); | 
|  | 292 |  | 
|  | 293 | /* Entitlement increases */ | 
|  | 294 | if (new_entitlement > vio_cmo.entitled) { | 
|  | 295 | delta = new_entitlement - vio_cmo.entitled; | 
|  | 296 |  | 
|  | 297 | /* Fulfill spare allocation */ | 
|  | 298 | if (vio_cmo.spare < VIO_CMO_MIN_ENT) { | 
|  | 299 | tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare)); | 
|  | 300 | vio_cmo.spare += tmp; | 
|  | 301 | vio_cmo.reserve.size += tmp; | 
|  | 302 | delta -= tmp; | 
|  | 303 | } | 
|  | 304 |  | 
|  | 305 | /* Remaining new allocation goes to the excess pool */ | 
|  | 306 | vio_cmo.entitled += delta; | 
|  | 307 | vio_cmo.excess.size += delta; | 
|  | 308 | vio_cmo.excess.free += delta; | 
|  | 309 |  | 
|  | 310 | goto out; | 
|  | 311 | } | 
|  | 312 |  | 
|  | 313 | /* Entitlement decreases */ | 
|  | 314 | delta = vio_cmo.entitled - new_entitlement; | 
|  | 315 | avail = vio_cmo.excess.free; | 
|  | 316 |  | 
|  | 317 | /* | 
|  | 318 | * Need to check how much unused entitlement each device can | 
|  | 319 | * sacrifice to fulfill entitlement change. | 
|  | 320 | */ | 
|  | 321 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | 
|  | 322 | if (avail >= delta) | 
|  | 323 | break; | 
|  | 324 |  | 
|  | 325 | viodev = dev_ent->viodev; | 
|  | 326 | if ((viodev->cmo.entitled > viodev->cmo.allocated) && | 
|  | 327 | (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) | 
|  | 328 | avail += viodev->cmo.entitled - | 
|  | 329 | max_t(size_t, viodev->cmo.allocated, | 
|  | 330 | VIO_CMO_MIN_ENT); | 
|  | 331 | } | 
|  | 332 |  | 
|  | 333 | if (delta <= avail) { | 
|  | 334 | vio_cmo.entitled -= delta; | 
|  | 335 |  | 
|  | 336 | /* Take entitlement from the excess pool first */ | 
|  | 337 | tmp = min(vio_cmo.excess.free, delta); | 
|  | 338 | vio_cmo.excess.size -= tmp; | 
|  | 339 | vio_cmo.excess.free -= tmp; | 
|  | 340 | delta -= tmp; | 
|  | 341 |  | 
|  | 342 | /* | 
|  | 343 | * Remove all but VIO_CMO_MIN_ENT bytes from devices | 
|  | 344 | * until entitlement change is served | 
|  | 345 | */ | 
|  | 346 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | 
|  | 347 | if (!delta) | 
|  | 348 | break; | 
|  | 349 |  | 
|  | 350 | viodev = dev_ent->viodev; | 
|  | 351 | tmp = 0; | 
|  | 352 | if ((viodev->cmo.entitled > viodev->cmo.allocated) && | 
|  | 353 | (viodev->cmo.entitled > VIO_CMO_MIN_ENT)) | 
|  | 354 | tmp = viodev->cmo.entitled - | 
|  | 355 | max_t(size_t, viodev->cmo.allocated, | 
|  | 356 | VIO_CMO_MIN_ENT); | 
|  | 357 | viodev->cmo.entitled -= min(tmp, delta); | 
|  | 358 | delta -= min(tmp, delta); | 
|  | 359 | } | 
|  | 360 | } else { | 
|  | 361 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 
|  | 362 | return -ENOMEM; | 
|  | 363 | } | 
|  | 364 |  | 
|  | 365 | out: | 
|  | 366 | schedule_delayed_work(&vio_cmo.balance_q, 0); | 
|  | 367 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 
|  | 368 | return 0; | 
|  | 369 | } | 
|  | 370 |  | 
|  | 371 | /** | 
|  | 372 | * vio_cmo_balance - Balance entitlement among devices | 
|  | 373 | * | 
|  | 374 | * @work: work queue structure for this operation | 
|  | 375 | * | 
|  | 376 | * Any system entitlement above the minimum needed for devices, or | 
|  | 377 | * already allocated to devices, can be distributed to the devices. | 
|  | 378 | * The list of devices is iterated through to recalculate the desired | 
|  | 379 | * entitlement level and to determine how much entitlement above the | 
|  | 380 | * minimum entitlement is allocated to devices. | 
|  | 381 | * | 
|  | 382 | * Small chunks of the available entitlement are given to devices until | 
|  | 383 | * their requirements are fulfilled or there is no entitlement left to give. | 
|  | 384 | * Upon completion sizes of the reserve and excess pools are calculated. | 
|  | 385 | * | 
|  | 386 | * The system minimum entitlement level is also recalculated here. | 
|  | 387 | * Entitlement will be reserved for devices even after vio_bus_remove to | 
|  | 388 | * accommodate reloading the driver.  The OF tree is walked to count the | 
|  | 389 | * number of devices present and this will remove entitlement for devices | 
|  | 390 | * that have actually left the system after having vio_bus_remove called. | 
|  | 391 | */ | 
|  | 392 | static void vio_cmo_balance(struct work_struct *work) | 
|  | 393 | { | 
|  | 394 | struct vio_cmo *cmo; | 
|  | 395 | struct vio_dev *viodev; | 
|  | 396 | struct vio_cmo_dev_entry *dev_ent; | 
|  | 397 | unsigned long flags; | 
|  | 398 | size_t avail = 0, level, chunk, need; | 
|  | 399 | int devcount = 0, fulfilled; | 
|  | 400 |  | 
|  | 401 | cmo = container_of(work, struct vio_cmo, balance_q.work); | 
|  | 402 |  | 
|  | 403 | spin_lock_irqsave(&vio_cmo.lock, flags); | 
|  | 404 |  | 
|  | 405 | /* Calculate minimum entitlement and fulfill spare */ | 
|  | 406 | cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT; | 
|  | 407 | BUG_ON(cmo->min > cmo->entitled); | 
|  | 408 | cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min)); | 
|  | 409 | cmo->min += cmo->spare; | 
|  | 410 | cmo->desired = cmo->min; | 
|  | 411 |  | 
|  | 412 | /* | 
|  | 413 | * Determine how much entitlement is available and reset device | 
|  | 414 | * entitlements | 
|  | 415 | */ | 
|  | 416 | avail = cmo->entitled - cmo->spare; | 
|  | 417 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | 
|  | 418 | viodev = dev_ent->viodev; | 
|  | 419 | devcount++; | 
|  | 420 | viodev->cmo.entitled = VIO_CMO_MIN_ENT; | 
|  | 421 | cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT); | 
|  | 422 | avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | /* | 
|  | 426 | * Having provided each device with the minimum entitlement, loop | 
|  | 427 | * over the devices portioning out the remaining entitlement | 
|  | 428 | * until there is nothing left. | 
|  | 429 | */ | 
|  | 430 | level = VIO_CMO_MIN_ENT; | 
|  | 431 | while (avail) { | 
|  | 432 | fulfilled = 0; | 
|  | 433 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | 
|  | 434 | viodev = dev_ent->viodev; | 
|  | 435 |  | 
|  | 436 | if (viodev->cmo.desired <= level) { | 
|  | 437 | fulfilled++; | 
|  | 438 | continue; | 
|  | 439 | } | 
|  | 440 |  | 
|  | 441 | /* | 
|  | 442 | * Give the device up to VIO_CMO_BALANCE_CHUNK | 
|  | 443 | * bytes of entitlement, but do not exceed the | 
|  | 444 | * desired level of entitlement for the device. | 
|  | 445 | */ | 
|  | 446 | chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); | 
|  | 447 | chunk = min(chunk, (viodev->cmo.desired - | 
|  | 448 | viodev->cmo.entitled)); | 
|  | 449 | viodev->cmo.entitled += chunk; | 
|  | 450 |  | 
|  | 451 | /* | 
|  | 452 | * If the memory for this entitlement increase was | 
|  | 453 | * already allocated to the device it does not come | 
|  | 454 | * from the available pool being portioned out. | 
|  | 455 | */ | 
|  | 456 | need = max(viodev->cmo.allocated, viodev->cmo.entitled)- | 
|  | 457 | max(viodev->cmo.allocated, level); | 
|  | 458 | avail -= need; | 
|  | 459 |  | 
|  | 460 | } | 
|  | 461 | if (fulfilled == devcount) | 
|  | 462 | break; | 
|  | 463 | level += VIO_CMO_BALANCE_CHUNK; | 
|  | 464 | } | 
|  | 465 |  | 
|  | 466 | /* Calculate new reserve and excess pool sizes */ | 
|  | 467 | cmo->reserve.size = cmo->min; | 
|  | 468 | cmo->excess.free = 0; | 
|  | 469 | cmo->excess.size = 0; | 
|  | 470 | need = 0; | 
|  | 471 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) { | 
|  | 472 | viodev = dev_ent->viodev; | 
|  | 473 | /* Calculated reserve size above the minimum entitlement */ | 
|  | 474 | if (viodev->cmo.entitled) | 
|  | 475 | cmo->reserve.size += (viodev->cmo.entitled - | 
|  | 476 | VIO_CMO_MIN_ENT); | 
|  | 477 | /* Calculated used excess entitlement */ | 
|  | 478 | if (viodev->cmo.allocated > viodev->cmo.entitled) | 
|  | 479 | need += viodev->cmo.allocated - viodev->cmo.entitled; | 
|  | 480 | } | 
|  | 481 | cmo->excess.size = cmo->entitled - cmo->reserve.size; | 
|  | 482 | cmo->excess.free = cmo->excess.size - need; | 
|  | 483 |  | 
| Jean Delvare | bf6aede | 2009-04-02 16:56:54 -0700 | [diff] [blame] | 484 | cancel_delayed_work(to_delayed_work(work)); | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 485 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 
|  | 486 | } | 
|  | 487 |  | 
|  | 488 | static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, | 
|  | 489 | dma_addr_t *dma_handle, gfp_t flag) | 
|  | 490 | { | 
|  | 491 | struct vio_dev *viodev = to_vio_dev(dev); | 
|  | 492 | void *ret; | 
|  | 493 |  | 
| Robert Jennings | 69b052e | 2009-01-22 09:40:00 +0000 | [diff] [blame] | 494 | if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) { | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 495 | atomic_inc(&viodev->cmo.allocs_failed); | 
|  | 496 | return NULL; | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag); | 
|  | 500 | if (unlikely(ret == NULL)) { | 
| Robert Jennings | 69b052e | 2009-01-22 09:40:00 +0000 | [diff] [blame] | 501 | vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 502 | atomic_inc(&viodev->cmo.allocs_failed); | 
|  | 503 | } | 
|  | 504 |  | 
|  | 505 | return ret; | 
|  | 506 | } | 
|  | 507 |  | 
|  | 508 | static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, | 
|  | 509 | void *vaddr, dma_addr_t dma_handle) | 
|  | 510 | { | 
|  | 511 | struct vio_dev *viodev = to_vio_dev(dev); | 
|  | 512 |  | 
|  | 513 | dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle); | 
|  | 514 |  | 
| Robert Jennings | 69b052e | 2009-01-22 09:40:00 +0000 | [diff] [blame] | 515 | vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 516 | } | 
|  | 517 |  | 
| Mark Nelson | f9226d5 | 2008-10-27 20:38:08 +0000 | [diff] [blame] | 518 | static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, | 
|  | 519 | unsigned long offset, size_t size, | 
|  | 520 | enum dma_data_direction direction, | 
|  | 521 | struct dma_attrs *attrs) | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 522 | { | 
|  | 523 | struct vio_dev *viodev = to_vio_dev(dev); | 
|  | 524 | dma_addr_t ret = DMA_ERROR_CODE; | 
|  | 525 |  | 
|  | 526 | if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) { | 
|  | 527 | atomic_inc(&viodev->cmo.allocs_failed); | 
|  | 528 | return ret; | 
|  | 529 | } | 
|  | 530 |  | 
| Mark Nelson | f9226d5 | 2008-10-27 20:38:08 +0000 | [diff] [blame] | 531 | ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); | 
| Stephen Rothwell | 0764bf6 | 2008-07-28 02:22:14 +1000 | [diff] [blame] | 532 | if (unlikely(dma_mapping_error(dev, ret))) { | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 533 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); | 
|  | 534 | atomic_inc(&viodev->cmo.allocs_failed); | 
|  | 535 | } | 
|  | 536 |  | 
|  | 537 | return ret; | 
|  | 538 | } | 
|  | 539 |  | 
| Mark Nelson | f9226d5 | 2008-10-27 20:38:08 +0000 | [diff] [blame] | 540 | static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, | 
|  | 541 | size_t size, | 
|  | 542 | enum dma_data_direction direction, | 
|  | 543 | struct dma_attrs *attrs) | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 544 | { | 
|  | 545 | struct vio_dev *viodev = to_vio_dev(dev); | 
|  | 546 |  | 
| Mark Nelson | f9226d5 | 2008-10-27 20:38:08 +0000 | [diff] [blame] | 547 | dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 548 |  | 
|  | 549 | vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); | 
|  | 550 | } | 
|  | 551 |  | 
|  | 552 | static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, | 
|  | 553 | int nelems, enum dma_data_direction direction, | 
|  | 554 | struct dma_attrs *attrs) | 
|  | 555 | { | 
|  | 556 | struct vio_dev *viodev = to_vio_dev(dev); | 
|  | 557 | struct scatterlist *sgl; | 
|  | 558 | int ret, count = 0; | 
|  | 559 | size_t alloc_size = 0; | 
|  | 560 |  | 
|  | 561 | for (sgl = sglist; count < nelems; count++, sgl++) | 
|  | 562 | alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE); | 
|  | 563 |  | 
|  | 564 | if (vio_cmo_alloc(viodev, alloc_size)) { | 
|  | 565 | atomic_inc(&viodev->cmo.allocs_failed); | 
|  | 566 | return 0; | 
|  | 567 | } | 
|  | 568 |  | 
|  | 569 | ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs); | 
|  | 570 |  | 
|  | 571 | if (unlikely(!ret)) { | 
|  | 572 | vio_cmo_dealloc(viodev, alloc_size); | 
|  | 573 | atomic_inc(&viodev->cmo.allocs_failed); | 
| Robert Jennings | 69b052e | 2009-01-22 09:40:00 +0000 | [diff] [blame] | 574 | return ret; | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 575 | } | 
|  | 576 |  | 
|  | 577 | for (sgl = sglist, count = 0; count < ret; count++, sgl++) | 
|  | 578 | alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE); | 
|  | 579 | if (alloc_size) | 
|  | 580 | vio_cmo_dealloc(viodev, alloc_size); | 
|  | 581 |  | 
|  | 582 | return ret; | 
|  | 583 | } | 
|  | 584 |  | 
|  | 585 | static void vio_dma_iommu_unmap_sg(struct device *dev, | 
|  | 586 | struct scatterlist *sglist, int nelems, | 
|  | 587 | enum dma_data_direction direction, | 
|  | 588 | struct dma_attrs *attrs) | 
|  | 589 | { | 
|  | 590 | struct vio_dev *viodev = to_vio_dev(dev); | 
|  | 591 | struct scatterlist *sgl; | 
|  | 592 | size_t alloc_size = 0; | 
|  | 593 | int count = 0; | 
|  | 594 |  | 
|  | 595 | for (sgl = sglist; count < nelems; count++, sgl++) | 
|  | 596 | alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE); | 
|  | 597 |  | 
|  | 598 | dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); | 
|  | 599 |  | 
|  | 600 | vio_cmo_dealloc(viodev, alloc_size); | 
|  | 601 | } | 
|  | 602 |  | 
| Nishanth Aravamudan | 6d283d7 | 2010-10-18 07:26:59 +0000 | [diff] [blame] | 603 | static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask) | 
|  | 604 | { | 
|  | 605 | return dma_iommu_ops.dma_supported(dev, mask); | 
|  | 606 | } | 
|  | 607 |  | 
| FUJITA Tomonori | 45223c5 | 2009-08-04 19:08:25 +0000 | [diff] [blame] | 608 | struct dma_map_ops vio_dma_mapping_ops = { | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 609 | .alloc_coherent = vio_dma_iommu_alloc_coherent, | 
|  | 610 | .free_coherent  = vio_dma_iommu_free_coherent, | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 611 | .map_sg         = vio_dma_iommu_map_sg, | 
|  | 612 | .unmap_sg       = vio_dma_iommu_unmap_sg, | 
| Mark Nelson | f9226d5 | 2008-10-27 20:38:08 +0000 | [diff] [blame] | 613 | .map_page       = vio_dma_iommu_map_page, | 
|  | 614 | .unmap_page     = vio_dma_iommu_unmap_page, | 
| Nishanth Aravamudan | 6d283d7 | 2010-10-18 07:26:59 +0000 | [diff] [blame] | 615 | .dma_supported  = vio_dma_iommu_dma_supported, | 
| Mark Nelson | f9226d5 | 2008-10-27 20:38:08 +0000 | [diff] [blame] | 616 |  | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 617 | }; | 
|  | 618 |  | 
|  | 619 | /** | 
|  | 620 | * vio_cmo_set_dev_desired - Set desired entitlement for a device | 
|  | 621 | * | 
|  | 622 | * @viodev: struct vio_dev for device to alter | 
|  | 623 | * @new_desired: new desired entitlement level in bytes | 
|  | 624 | * | 
|  | 625 | * For use by devices to request a change to their entitlement at runtime or | 
|  | 626 | * through sysfs.  The desired entitlement level is changed and a balancing | 
|  | 627 | * of system resources is scheduled to run in the future. | 
|  | 628 | */ | 
|  | 629 | void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) | 
|  | 630 | { | 
|  | 631 | unsigned long flags; | 
|  | 632 | struct vio_cmo_dev_entry *dev_ent; | 
|  | 633 | int found = 0; | 
|  | 634 |  | 
|  | 635 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 
|  | 636 | return; | 
|  | 637 |  | 
|  | 638 | spin_lock_irqsave(&vio_cmo.lock, flags); | 
|  | 639 | if (desired < VIO_CMO_MIN_ENT) | 
|  | 640 | desired = VIO_CMO_MIN_ENT; | 
|  | 641 |  | 
|  | 642 | /* | 
|  | 643 | * Changes will not be made for devices not in the device list. | 
|  | 644 | * If it is not in the device list, then no driver is loaded | 
|  | 645 | * for the device and it can not receive entitlement. | 
|  | 646 | */ | 
|  | 647 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) | 
|  | 648 | if (viodev == dev_ent->viodev) { | 
|  | 649 | found = 1; | 
|  | 650 | break; | 
|  | 651 | } | 
| Julia Lawall | f6d8c8b | 2010-03-29 05:33:34 +0000 | [diff] [blame] | 652 | if (!found) { | 
|  | 653 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 654 | return; | 
| Julia Lawall | f6d8c8b | 2010-03-29 05:33:34 +0000 | [diff] [blame] | 655 | } | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 656 |  | 
|  | 657 | /* Increase/decrease in desired device entitlement */ | 
|  | 658 | if (desired >= viodev->cmo.desired) { | 
|  | 659 | /* Just bump the bus and device values prior to a balance*/ | 
|  | 660 | vio_cmo.desired += desired - viodev->cmo.desired; | 
|  | 661 | viodev->cmo.desired = desired; | 
|  | 662 | } else { | 
|  | 663 | /* Decrease bus and device values for desired entitlement */ | 
|  | 664 | vio_cmo.desired -= viodev->cmo.desired - desired; | 
|  | 665 | viodev->cmo.desired = desired; | 
|  | 666 | /* | 
|  | 667 | * If less entitlement is desired than current entitlement, move | 
|  | 668 | * any reserve memory in the change region to the excess pool. | 
|  | 669 | */ | 
|  | 670 | if (viodev->cmo.entitled > desired) { | 
|  | 671 | vio_cmo.reserve.size -= viodev->cmo.entitled - desired; | 
|  | 672 | vio_cmo.excess.size += viodev->cmo.entitled - desired; | 
|  | 673 | /* | 
|  | 674 | * If entitlement moving from the reserve pool to the | 
|  | 675 | * excess pool is currently unused, add to the excess | 
|  | 676 | * free counter. | 
|  | 677 | */ | 
|  | 678 | if (viodev->cmo.allocated < viodev->cmo.entitled) | 
|  | 679 | vio_cmo.excess.free += viodev->cmo.entitled - | 
|  | 680 | max(viodev->cmo.allocated, desired); | 
|  | 681 | viodev->cmo.entitled = desired; | 
|  | 682 | } | 
|  | 683 | } | 
|  | 684 | schedule_delayed_work(&vio_cmo.balance_q, 0); | 
|  | 685 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 
|  | 686 | } | 
|  | 687 |  | 
|  | 688 | /** | 
|  | 689 | * vio_cmo_bus_probe - Handle CMO specific bus probe activities | 
|  | 690 | * | 
|  | 691 | * @viodev - Pointer to struct vio_dev for device | 
|  | 692 | * | 
|  | 693 | * Determine the devices IO memory entitlement needs, attempting | 
|  | 694 | * to satisfy the system minimum entitlement at first and scheduling | 
|  | 695 | * a balance operation to take care of the rest at a later time. | 
|  | 696 | * | 
|  | 697 | * Returns: 0 on success, -EINVAL when device doesn't support CMO, and | 
|  | 698 | *          -ENOMEM when entitlement is not available for device or | 
|  | 699 | *          device entry. | 
|  | 700 | * | 
|  | 701 | */ | 
|  | 702 | static int vio_cmo_bus_probe(struct vio_dev *viodev) | 
|  | 703 | { | 
|  | 704 | struct vio_cmo_dev_entry *dev_ent; | 
|  | 705 | struct device *dev = &viodev->dev; | 
|  | 706 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | 
|  | 707 | unsigned long flags; | 
|  | 708 | size_t size; | 
|  | 709 |  | 
|  | 710 | /* | 
|  | 711 | * Check to see that device has a DMA window and configure | 
|  | 712 | * entitlement for the device. | 
|  | 713 | */ | 
| Grant Likely | 58f9b0b | 2010-04-13 16:12:56 -0700 | [diff] [blame] | 714 | if (of_get_property(viodev->dev.of_node, | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 715 | "ibm,my-dma-window", NULL)) { | 
|  | 716 | /* Check that the driver is CMO enabled and get desired DMA */ | 
|  | 717 | if (!viodrv->get_desired_dma) { | 
|  | 718 | dev_err(dev, "%s: device driver does not support CMO\n", | 
|  | 719 | __func__); | 
|  | 720 | return -EINVAL; | 
|  | 721 | } | 
|  | 722 |  | 
|  | 723 | viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev)); | 
|  | 724 | if (viodev->cmo.desired < VIO_CMO_MIN_ENT) | 
|  | 725 | viodev->cmo.desired = VIO_CMO_MIN_ENT; | 
|  | 726 | size = VIO_CMO_MIN_ENT; | 
|  | 727 |  | 
|  | 728 | dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry), | 
|  | 729 | GFP_KERNEL); | 
|  | 730 | if (!dev_ent) | 
|  | 731 | return -ENOMEM; | 
|  | 732 |  | 
|  | 733 | dev_ent->viodev = viodev; | 
|  | 734 | spin_lock_irqsave(&vio_cmo.lock, flags); | 
|  | 735 | list_add(&dev_ent->list, &vio_cmo.device_list); | 
|  | 736 | } else { | 
|  | 737 | viodev->cmo.desired = 0; | 
|  | 738 | size = 0; | 
|  | 739 | spin_lock_irqsave(&vio_cmo.lock, flags); | 
|  | 740 | } | 
|  | 741 |  | 
|  | 742 | /* | 
|  | 743 | * If the needs for vio_cmo.min have not changed since they | 
|  | 744 | * were last set, the number of devices in the OF tree has | 
|  | 745 | * been constant and the IO memory for this is already in | 
|  | 746 | * the reserve pool. | 
|  | 747 | */ | 
|  | 748 | if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) * | 
|  | 749 | VIO_CMO_MIN_ENT)) { | 
|  | 750 | /* Updated desired entitlement if device requires it */ | 
|  | 751 | if (size) | 
|  | 752 | vio_cmo.desired += (viodev->cmo.desired - | 
|  | 753 | VIO_CMO_MIN_ENT); | 
|  | 754 | } else { | 
|  | 755 | size_t tmp; | 
|  | 756 |  | 
|  | 757 | tmp = vio_cmo.spare + vio_cmo.excess.free; | 
|  | 758 | if (tmp < size) { | 
|  | 759 | dev_err(dev, "%s: insufficient free " | 
|  | 760 | "entitlement to add device. " | 
|  | 761 | "Need %lu, have %lu\n", __func__, | 
|  | 762 | size, (vio_cmo.spare + tmp)); | 
|  | 763 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 
|  | 764 | return -ENOMEM; | 
|  | 765 | } | 
|  | 766 |  | 
|  | 767 | /* Use excess pool first to fulfill request */ | 
|  | 768 | tmp = min(size, vio_cmo.excess.free); | 
|  | 769 | vio_cmo.excess.free -= tmp; | 
|  | 770 | vio_cmo.excess.size -= tmp; | 
|  | 771 | vio_cmo.reserve.size += tmp; | 
|  | 772 |  | 
|  | 773 | /* Use spare if excess pool was insufficient */ | 
|  | 774 | vio_cmo.spare -= size - tmp; | 
|  | 775 |  | 
|  | 776 | /* Update bus accounting */ | 
|  | 777 | vio_cmo.min += size; | 
|  | 778 | vio_cmo.desired += viodev->cmo.desired; | 
|  | 779 | } | 
|  | 780 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 
|  | 781 | return 0; | 
|  | 782 | } | 
|  | 783 |  | 
|  | 784 | /** | 
|  | 785 | * vio_cmo_bus_remove - Handle CMO specific bus removal activities | 
|  | 786 | * | 
|  | 787 | * @viodev - Pointer to struct vio_dev for device | 
|  | 788 | * | 
|  | 789 | * Remove the device from the cmo device list.  The minimum entitlement | 
|  | 790 | * will be reserved for the device as long as it is in the system.  The | 
|  | 791 | * rest of the entitlement the device had been allocated will be returned | 
|  | 792 | * to the system. | 
|  | 793 | */ | 
|  | 794 | static void vio_cmo_bus_remove(struct vio_dev *viodev) | 
|  | 795 | { | 
|  | 796 | struct vio_cmo_dev_entry *dev_ent; | 
|  | 797 | unsigned long flags; | 
|  | 798 | size_t tmp; | 
|  | 799 |  | 
|  | 800 | spin_lock_irqsave(&vio_cmo.lock, flags); | 
|  | 801 | if (viodev->cmo.allocated) { | 
|  | 802 | dev_err(&viodev->dev, "%s: device had %lu bytes of IO " | 
|  | 803 | "allocated after remove operation.\n", | 
|  | 804 | __func__, viodev->cmo.allocated); | 
|  | 805 | BUG(); | 
|  | 806 | } | 
|  | 807 |  | 
|  | 808 | /* | 
|  | 809 | * Remove the device from the device list being maintained for | 
|  | 810 | * CMO enabled devices. | 
|  | 811 | */ | 
|  | 812 | list_for_each_entry(dev_ent, &vio_cmo.device_list, list) | 
|  | 813 | if (viodev == dev_ent->viodev) { | 
|  | 814 | list_del(&dev_ent->list); | 
|  | 815 | kfree(dev_ent); | 
|  | 816 | break; | 
|  | 817 | } | 
|  | 818 |  | 
|  | 819 | /* | 
|  | 820 | * Devices may not require any entitlement and they do not need | 
|  | 821 | * to be processed.  Otherwise, return the device's entitlement | 
|  | 822 | * back to the pools. | 
|  | 823 | */ | 
|  | 824 | if (viodev->cmo.entitled) { | 
|  | 825 | /* | 
|  | 826 | * This device has not yet left the OF tree, it's | 
|  | 827 | * minimum entitlement remains in vio_cmo.min and | 
|  | 828 | * vio_cmo.desired | 
|  | 829 | */ | 
|  | 830 | vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT); | 
|  | 831 |  | 
|  | 832 | /* | 
|  | 833 | * Save min allocation for device in reserve as long | 
|  | 834 | * as it exists in OF tree as determined by later | 
|  | 835 | * balance operation | 
|  | 836 | */ | 
|  | 837 | viodev->cmo.entitled -= VIO_CMO_MIN_ENT; | 
|  | 838 |  | 
|  | 839 | /* Replenish spare from freed reserve pool */ | 
|  | 840 | if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) { | 
|  | 841 | tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT - | 
|  | 842 | vio_cmo.spare)); | 
|  | 843 | vio_cmo.spare += tmp; | 
|  | 844 | viodev->cmo.entitled -= tmp; | 
|  | 845 | } | 
|  | 846 |  | 
|  | 847 | /* Remaining reserve goes to excess pool */ | 
|  | 848 | vio_cmo.excess.size += viodev->cmo.entitled; | 
|  | 849 | vio_cmo.excess.free += viodev->cmo.entitled; | 
|  | 850 | vio_cmo.reserve.size -= viodev->cmo.entitled; | 
|  | 851 |  | 
|  | 852 | /* | 
|  | 853 | * Until the device is removed it will keep a | 
|  | 854 | * minimum entitlement; this will guarantee that | 
|  | 855 | * a module unload/load will result in a success. | 
|  | 856 | */ | 
|  | 857 | viodev->cmo.entitled = VIO_CMO_MIN_ENT; | 
|  | 858 | viodev->cmo.desired = VIO_CMO_MIN_ENT; | 
|  | 859 | atomic_set(&viodev->cmo.allocs_failed, 0); | 
|  | 860 | } | 
|  | 861 |  | 
|  | 862 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 
|  | 863 | } | 
|  | 864 |  | 
|  | 865 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) | 
|  | 866 | { | 
| Nishanth Aravamudan | 6d283d7 | 2010-10-18 07:26:59 +0000 | [diff] [blame] | 867 | set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 868 | } | 
|  | 869 |  | 
|  | 870 | /** | 
|  | 871 | * vio_cmo_bus_init - CMO entitlement initialization at bus init time | 
|  | 872 | * | 
|  | 873 | * Set up the reserve and excess entitlement pools based on available | 
|  | 874 | * system entitlement and the number of devices in the OF tree that | 
|  | 875 | * require entitlement in the reserve pool. | 
|  | 876 | */ | 
|  | 877 | static void vio_cmo_bus_init(void) | 
|  | 878 | { | 
|  | 879 | struct hvcall_mpp_data mpp_data; | 
|  | 880 | int err; | 
|  | 881 |  | 
|  | 882 | memset(&vio_cmo, 0, sizeof(struct vio_cmo)); | 
|  | 883 | spin_lock_init(&vio_cmo.lock); | 
|  | 884 | INIT_LIST_HEAD(&vio_cmo.device_list); | 
|  | 885 | INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance); | 
|  | 886 |  | 
|  | 887 | /* Get current system entitlement */ | 
|  | 888 | err = h_get_mpp(&mpp_data); | 
|  | 889 |  | 
|  | 890 | /* | 
|  | 891 | * On failure, continue with entitlement set to 0, will panic() | 
|  | 892 | * later when spare is reserved. | 
|  | 893 | */ | 
|  | 894 | if (err != H_SUCCESS) { | 
|  | 895 | printk(KERN_ERR "%s: unable to determine system IO "\ | 
|  | 896 | "entitlement. (%d)\n", __func__, err); | 
|  | 897 | vio_cmo.entitled = 0; | 
|  | 898 | } else { | 
|  | 899 | vio_cmo.entitled = mpp_data.entitled_mem; | 
|  | 900 | } | 
|  | 901 |  | 
|  | 902 | /* Set reservation and check against entitlement */ | 
|  | 903 | vio_cmo.spare = VIO_CMO_MIN_ENT; | 
|  | 904 | vio_cmo.reserve.size = vio_cmo.spare; | 
|  | 905 | vio_cmo.reserve.size += (vio_cmo_num_OF_devs() * | 
|  | 906 | VIO_CMO_MIN_ENT); | 
|  | 907 | if (vio_cmo.reserve.size > vio_cmo.entitled) { | 
|  | 908 | printk(KERN_ERR "%s: insufficient system entitlement\n", | 
|  | 909 | __func__); | 
|  | 910 | panic("%s: Insufficient system entitlement", __func__); | 
|  | 911 | } | 
|  | 912 |  | 
|  | 913 | /* Set the remaining accounting variables */ | 
|  | 914 | vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size; | 
|  | 915 | vio_cmo.excess.free = vio_cmo.excess.size; | 
|  | 916 | vio_cmo.min = vio_cmo.reserve.size; | 
|  | 917 | vio_cmo.desired = vio_cmo.reserve.size; | 
|  | 918 | } | 
|  | 919 |  | 
|  | 920 | /* sysfs device functions and data structures for CMO */ | 
|  | 921 |  | 
|  | 922 | #define viodev_cmo_rd_attr(name)                                        \ | 
|  | 923 | static ssize_t viodev_cmo_##name##_show(struct device *dev,             \ | 
|  | 924 | struct device_attribute *attr,  \ | 
|  | 925 | char *buf)                     \ | 
|  | 926 | {                                                                       \ | 
|  | 927 | return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name);        \ | 
|  | 928 | } | 
|  | 929 |  | 
|  | 930 | static ssize_t viodev_cmo_allocs_failed_show(struct device *dev, | 
|  | 931 | struct device_attribute *attr, char *buf) | 
|  | 932 | { | 
|  | 933 | struct vio_dev *viodev = to_vio_dev(dev); | 
|  | 934 | return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed)); | 
|  | 935 | } | 
|  | 936 |  | 
|  | 937 | static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev, | 
|  | 938 | struct device_attribute *attr, const char *buf, size_t count) | 
|  | 939 | { | 
|  | 940 | struct vio_dev *viodev = to_vio_dev(dev); | 
|  | 941 | atomic_set(&viodev->cmo.allocs_failed, 0); | 
|  | 942 | return count; | 
|  | 943 | } | 
|  | 944 |  | 
|  | 945 | static ssize_t viodev_cmo_desired_set(struct device *dev, | 
|  | 946 | struct device_attribute *attr, const char *buf, size_t count) | 
|  | 947 | { | 
|  | 948 | struct vio_dev *viodev = to_vio_dev(dev); | 
|  | 949 | size_t new_desired; | 
|  | 950 | int ret; | 
|  | 951 |  | 
|  | 952 | ret = strict_strtoul(buf, 10, &new_desired); | 
|  | 953 | if (ret) | 
|  | 954 | return ret; | 
|  | 955 |  | 
|  | 956 | vio_cmo_set_dev_desired(viodev, new_desired); | 
|  | 957 | return count; | 
|  | 958 | } | 
|  | 959 |  | 
|  | 960 | viodev_cmo_rd_attr(desired); | 
|  | 961 | viodev_cmo_rd_attr(entitled); | 
|  | 962 | viodev_cmo_rd_attr(allocated); | 
|  | 963 |  | 
|  | 964 | static ssize_t name_show(struct device *, struct device_attribute *, char *); | 
|  | 965 | static ssize_t devspec_show(struct device *, struct device_attribute *, char *); | 
| Benjamin Herrenschmidt | 578b7cd | 2010-04-07 14:44:28 +1000 | [diff] [blame] | 966 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | 
|  | 967 | char *buf); | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 968 | static struct device_attribute vio_cmo_dev_attrs[] = { | 
|  | 969 | __ATTR_RO(name), | 
|  | 970 | __ATTR_RO(devspec), | 
| Benjamin Herrenschmidt | 578b7cd | 2010-04-07 14:44:28 +1000 | [diff] [blame] | 971 | __ATTR_RO(modalias), | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 972 | __ATTR(cmo_desired,       S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, | 
|  | 973 | viodev_cmo_desired_show, viodev_cmo_desired_set), | 
|  | 974 | __ATTR(cmo_entitled,      S_IRUGO, viodev_cmo_entitled_show,      NULL), | 
|  | 975 | __ATTR(cmo_allocated,     S_IRUGO, viodev_cmo_allocated_show,     NULL), | 
|  | 976 | __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, | 
|  | 977 | viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset), | 
|  | 978 | __ATTR_NULL | 
|  | 979 | }; | 
|  | 980 |  | 
|  | 981 | /* sysfs bus functions and data structures for CMO */ | 
|  | 982 |  | 
|  | 983 | #define viobus_cmo_rd_attr(name)                                        \ | 
|  | 984 | static ssize_t                                                          \ | 
|  | 985 | viobus_cmo_##name##_show(struct bus_type *bt, char *buf)                \ | 
|  | 986 | {                                                                       \ | 
|  | 987 | return sprintf(buf, "%lu\n", vio_cmo.name);                     \ | 
|  | 988 | } | 
|  | 989 |  | 
|  | 990 | #define viobus_cmo_pool_rd_attr(name, var)                              \ | 
|  | 991 | static ssize_t                                                          \ | 
|  | 992 | viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf)     \ | 
|  | 993 | {                                                                       \ | 
|  | 994 | return sprintf(buf, "%lu\n", vio_cmo.name.var);                 \ | 
|  | 995 | } | 
|  | 996 |  | 
|  | 997 | static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf, | 
|  | 998 | size_t count) | 
|  | 999 | { | 
|  | 1000 | unsigned long flags; | 
|  | 1001 |  | 
|  | 1002 | spin_lock_irqsave(&vio_cmo.lock, flags); | 
|  | 1003 | vio_cmo.high = vio_cmo.curr; | 
|  | 1004 | spin_unlock_irqrestore(&vio_cmo.lock, flags); | 
|  | 1005 |  | 
|  | 1006 | return count; | 
|  | 1007 | } | 
|  | 1008 |  | 
|  | 1009 | viobus_cmo_rd_attr(entitled); | 
|  | 1010 | viobus_cmo_pool_rd_attr(reserve, size); | 
|  | 1011 | viobus_cmo_pool_rd_attr(excess, size); | 
|  | 1012 | viobus_cmo_pool_rd_attr(excess, free); | 
|  | 1013 | viobus_cmo_rd_attr(spare); | 
|  | 1014 | viobus_cmo_rd_attr(min); | 
|  | 1015 | viobus_cmo_rd_attr(desired); | 
|  | 1016 | viobus_cmo_rd_attr(curr); | 
|  | 1017 | viobus_cmo_rd_attr(high); | 
|  | 1018 |  | 
|  | 1019 | static struct bus_attribute vio_cmo_bus_attrs[] = { | 
|  | 1020 | __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL), | 
|  | 1021 | __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL), | 
|  | 1022 | __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL), | 
|  | 1023 | __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL), | 
|  | 1024 | __ATTR(cmo_spare,   S_IRUGO, viobus_cmo_spare_show,   NULL), | 
|  | 1025 | __ATTR(cmo_min,     S_IRUGO, viobus_cmo_min_show,     NULL), | 
|  | 1026 | __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL), | 
|  | 1027 | __ATTR(cmo_curr,    S_IRUGO, viobus_cmo_curr_show,    NULL), | 
|  | 1028 | __ATTR(cmo_high,    S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, | 
|  | 1029 | viobus_cmo_high_show, viobus_cmo_high_reset), | 
|  | 1030 | __ATTR_NULL | 
|  | 1031 | }; | 
|  | 1032 |  | 
|  | 1033 | static void vio_cmo_sysfs_init(void) | 
|  | 1034 | { | 
|  | 1035 | vio_bus_type.dev_attrs = vio_cmo_dev_attrs; | 
|  | 1036 | vio_bus_type.bus_attrs = vio_cmo_bus_attrs; | 
|  | 1037 | } | 
|  | 1038 | #else /* CONFIG_PPC_SMLPAR */ | 
|  | 1039 | /* Dummy functions for iSeries platform */ | 
|  | 1040 | int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } | 
|  | 1041 | void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} | 
|  | 1042 | static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } | 
|  | 1043 | static void vio_cmo_bus_remove(struct vio_dev *viodev) {} | 
|  | 1044 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {} | 
| Nathan Lynch | b9fa49a | 2008-07-26 09:06:17 +1000 | [diff] [blame] | 1045 | static void vio_cmo_bus_init(void) {} | 
|  | 1046 | static void vio_cmo_sysfs_init(void) { } | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 1047 | #endif /* CONFIG_PPC_SMLPAR */ | 
|  | 1048 | EXPORT_SYMBOL(vio_cmo_entitlement_update); | 
|  | 1049 | EXPORT_SYMBOL(vio_cmo_set_dev_desired); | 
|  | 1050 |  | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1051 | static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) | 
|  | 1052 | { | 
| Stephen Rothwell | dd9b67a | 2007-10-11 14:55:02 +1000 | [diff] [blame] | 1053 | const unsigned char *dma_window; | 
|  | 1054 | struct iommu_table *tbl; | 
|  | 1055 | unsigned long offset, size; | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1056 |  | 
| Stephen Rothwell | dd9b67a | 2007-10-11 14:55:02 +1000 | [diff] [blame] | 1057 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 
|  | 1058 | return vio_build_iommu_table_iseries(dev); | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1059 |  | 
| Grant Likely | 58f9b0b | 2010-04-13 16:12:56 -0700 | [diff] [blame] | 1060 | dma_window = of_get_property(dev->dev.of_node, | 
| Stephen Rothwell | dd9b67a | 2007-10-11 14:55:02 +1000 | [diff] [blame] | 1061 | "ibm,my-dma-window", NULL); | 
|  | 1062 | if (!dma_window) | 
|  | 1063 | return NULL; | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1064 |  | 
| Anton Blanchard | 7aa241f | 2010-08-11 16:42:48 +0000 | [diff] [blame] | 1065 | tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); | 
| roel kluin | 0f33727 | 2009-09-09 05:02:24 +0000 | [diff] [blame] | 1066 | if (tbl == NULL) | 
|  | 1067 | return NULL; | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1068 |  | 
| Grant Likely | 58f9b0b | 2010-04-13 16:12:56 -0700 | [diff] [blame] | 1069 | of_parse_dma_window(dev->dev.of_node, dma_window, | 
| Stephen Rothwell | dd9b67a | 2007-10-11 14:55:02 +1000 | [diff] [blame] | 1070 | &tbl->it_index, &offset, &size); | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1071 |  | 
| Stephen Rothwell | dd9b67a | 2007-10-11 14:55:02 +1000 | [diff] [blame] | 1072 | /* TCE table size - measured in tce entries */ | 
|  | 1073 | tbl->it_size = size >> IOMMU_PAGE_SHIFT; | 
|  | 1074 | /* offset for VIO should always be 0 */ | 
|  | 1075 | tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; | 
|  | 1076 | tbl->it_busno = 0; | 
|  | 1077 | tbl->it_type = TCE_VB; | 
| Anton Blanchard | 7aa241f | 2010-08-11 16:42:48 +0000 | [diff] [blame] | 1078 | tbl->it_blocksize = 16; | 
| Stephen Rothwell | dd9b67a | 2007-10-11 14:55:02 +1000 | [diff] [blame] | 1079 |  | 
|  | 1080 | return iommu_init_table(tbl, -1); | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1081 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1082 |  | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1083 | /** | 
|  | 1084 | * vio_match_device: - Tell if a VIO device has a matching | 
|  | 1085 | *			VIO device id structure. | 
|  | 1086 | * @ids:	array of VIO device id structures to search in | 
|  | 1087 | * @dev:	the VIO device structure to match against | 
|  | 1088 | * | 
|  | 1089 | * Used by a driver to check whether a VIO device present in the | 
|  | 1090 | * system is in its list of supported devices. Returns the matching | 
|  | 1091 | * vio_device_id structure or NULL if there is no match. | 
|  | 1092 | */ | 
|  | 1093 | static const struct vio_device_id *vio_match_device( | 
|  | 1094 | const struct vio_device_id *ids, const struct vio_dev *dev) | 
|  | 1095 | { | 
|  | 1096 | while (ids->type[0] != '\0') { | 
| Stephen Rothwell | dd721ff | 2006-04-27 17:21:46 +1000 | [diff] [blame] | 1097 | if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && | 
| Grant Likely | 58f9b0b | 2010-04-13 16:12:56 -0700 | [diff] [blame] | 1098 | of_device_is_compatible(dev->dev.of_node, | 
| Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 1099 | ids->compat)) | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1100 | return ids; | 
|  | 1101 | ids++; | 
|  | 1102 | } | 
|  | 1103 | return NULL; | 
|  | 1104 | } | 
|  | 1105 |  | 
| Stephen Rothwell | 5c0b4b8 | 2005-08-17 16:37:35 +1000 | [diff] [blame] | 1106 | /* | 
|  | 1107 | * Convert from struct device to struct vio_dev and pass to driver. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 | * dev->driver has already been set by generic code because vio_bus_match | 
| Stephen Rothwell | 5c0b4b8 | 2005-08-17 16:37:35 +1000 | [diff] [blame] | 1109 | * succeeded. | 
|  | 1110 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1111 | static int vio_bus_probe(struct device *dev) | 
|  | 1112 | { | 
|  | 1113 | struct vio_dev *viodev = to_vio_dev(dev); | 
|  | 1114 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | 
|  | 1115 | const struct vio_device_id *id; | 
|  | 1116 | int error = -ENODEV; | 
|  | 1117 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 | if (!viodrv->probe) | 
|  | 1119 | return error; | 
|  | 1120 |  | 
|  | 1121 | id = vio_match_device(viodrv->id_table, viodev); | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 1122 | if (id) { | 
|  | 1123 | memset(&viodev->cmo, 0, sizeof(viodev->cmo)); | 
|  | 1124 | if (firmware_has_feature(FW_FEATURE_CMO)) { | 
|  | 1125 | error = vio_cmo_bus_probe(viodev); | 
|  | 1126 | if (error) | 
|  | 1127 | return error; | 
|  | 1128 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | error = viodrv->probe(viodev, id); | 
| Brian King | cd5aeb9 | 2008-08-13 05:21:45 +1000 | [diff] [blame] | 1130 | if (error && firmware_has_feature(FW_FEATURE_CMO)) | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 1131 | vio_cmo_bus_remove(viodev); | 
|  | 1132 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 |  | 
|  | 1134 | return error; | 
|  | 1135 | } | 
|  | 1136 |  | 
|  | 1137 | /* convert from struct device to struct vio_dev and pass to driver. */ | 
|  | 1138 | static int vio_bus_remove(struct device *dev) | 
|  | 1139 | { | 
|  | 1140 | struct vio_dev *viodev = to_vio_dev(dev); | 
|  | 1141 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 1142 | struct device *devptr; | 
|  | 1143 | int ret = 1; | 
|  | 1144 |  | 
|  | 1145 | /* | 
|  | 1146 | * Hold a reference to the device after the remove function is called | 
|  | 1147 | * to allow for CMO accounting cleanup for the device. | 
|  | 1148 | */ | 
|  | 1149 | devptr = get_device(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1150 |  | 
| Stephen Rothwell | 5c0b4b8 | 2005-08-17 16:37:35 +1000 | [diff] [blame] | 1151 | if (viodrv->remove) | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 1152 | ret = viodrv->remove(viodev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1153 |  | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 1154 | if (!ret && firmware_has_feature(FW_FEATURE_CMO)) | 
|  | 1155 | vio_cmo_bus_remove(viodev); | 
|  | 1156 |  | 
|  | 1157 | put_device(devptr); | 
|  | 1158 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | } | 
|  | 1160 |  | 
|  | 1161 | /** | 
|  | 1162 | * vio_register_driver: - Register a new vio driver | 
|  | 1163 | * @drv:	The vio_driver structure to be registered. | 
|  | 1164 | */ | 
|  | 1165 | int vio_register_driver(struct vio_driver *viodrv) | 
|  | 1166 | { | 
| Harvey Harrison | e48b1b4 | 2008-03-29 08:21:07 +1100 | [diff] [blame] | 1167 | printk(KERN_DEBUG "%s: driver %s registering\n", __func__, | 
| Stephen Rothwell | 6fdf539 | 2005-10-24 14:53:21 +1000 | [diff] [blame] | 1168 | viodrv->driver.name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 |  | 
|  | 1170 | /* fill in 'struct driver' fields */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1171 | viodrv->driver.bus = &vio_bus_type; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1172 |  | 
|  | 1173 | return driver_register(&viodrv->driver); | 
|  | 1174 | } | 
|  | 1175 | EXPORT_SYMBOL(vio_register_driver); | 
|  | 1176 |  | 
|  | 1177 | /** | 
|  | 1178 | * vio_unregister_driver - Remove registration of vio driver. | 
|  | 1179 | * @driver:	The vio_driver struct to be removed form registration | 
|  | 1180 | */ | 
|  | 1181 | void vio_unregister_driver(struct vio_driver *viodrv) | 
|  | 1182 | { | 
|  | 1183 | driver_unregister(&viodrv->driver); | 
|  | 1184 | } | 
|  | 1185 | EXPORT_SYMBOL(vio_unregister_driver); | 
|  | 1186 |  | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1187 | /* vio_dev refcount hit 0 */ | 
|  | 1188 | static void __devinit vio_dev_release(struct device *dev) | 
|  | 1189 | { | 
| Nishanth Aravamudan | 45848e0 | 2010-09-15 08:05:48 +0000 | [diff] [blame] | 1190 | struct iommu_table *tbl = get_iommu_table_base(dev); | 
|  | 1191 |  | 
|  | 1192 | /* iSeries uses a common table for all vio devices */ | 
|  | 1193 | if (!firmware_has_feature(FW_FEATURE_ISERIES) && tbl) | 
|  | 1194 | iommu_free_table(tbl, dev->of_node ? | 
|  | 1195 | dev->of_node->full_name : dev_name(dev)); | 
| Grant Likely | 58f9b0b | 2010-04-13 16:12:56 -0700 | [diff] [blame] | 1196 | of_node_put(dev->of_node); | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1197 | kfree(to_vio_dev(dev)); | 
|  | 1198 | } | 
|  | 1199 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | /** | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1201 | * vio_register_device_node: - Register a new vio device. | 
|  | 1202 | * @of_node:	The OF node for this device. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1203 | * | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1204 | * Creates and initializes a vio_dev structure from the data in | 
| Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 1205 | * of_node and adds it to the list of virtual devices. | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1206 | * Returns a pointer to the created vio_dev or NULL if node has | 
|  | 1207 | * NULL device_type or compatible fields. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1208 | */ | 
| Stephen Rothwell | de7d812 | 2008-02-05 14:15:12 +1100 | [diff] [blame] | 1209 | struct vio_dev *vio_register_device_node(struct device_node *of_node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1210 | { | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1211 | struct vio_dev *viodev; | 
| Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 1212 | const unsigned int *unit_address; | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1213 |  | 
|  | 1214 | /* we need the 'device_type' property, in order to match with drivers */ | 
|  | 1215 | if (of_node->type == NULL) { | 
|  | 1216 | printk(KERN_WARNING "%s: node %s missing 'device_type'\n", | 
| Harvey Harrison | e48b1b4 | 2008-03-29 08:21:07 +1100 | [diff] [blame] | 1217 | __func__, | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1218 | of_node->name ? of_node->name : "<unknown>"); | 
|  | 1219 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 | } | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1221 |  | 
| Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 1222 | unit_address = of_get_property(of_node, "reg", NULL); | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1223 | if (unit_address == NULL) { | 
|  | 1224 | printk(KERN_WARNING "%s: node %s missing 'reg'\n", | 
| Harvey Harrison | e48b1b4 | 2008-03-29 08:21:07 +1100 | [diff] [blame] | 1225 | __func__, | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1226 | of_node->name ? of_node->name : "<unknown>"); | 
|  | 1227 | return NULL; | 
|  | 1228 | } | 
|  | 1229 |  | 
|  | 1230 | /* allocate a vio_dev for this node */ | 
|  | 1231 | viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); | 
|  | 1232 | if (viodev == NULL) | 
|  | 1233 | return NULL; | 
|  | 1234 |  | 
| Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 1235 | viodev->irq = irq_of_parse_and_map(of_node, 0); | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1236 |  | 
| Kay Sievers | aab0d37 | 2008-12-04 10:02:56 -0800 | [diff] [blame] | 1237 | dev_set_name(&viodev->dev, "%x", *unit_address); | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1238 | viodev->name = of_node->name; | 
|  | 1239 | viodev->type = of_node->type; | 
|  | 1240 | viodev->unit_address = *unit_address; | 
|  | 1241 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | 
| Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 1242 | unit_address = of_get_property(of_node, | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1243 | "linux,unit_address", NULL); | 
|  | 1244 | if (unit_address != NULL) | 
|  | 1245 | viodev->unit_address = *unit_address; | 
|  | 1246 | } | 
| Grant Likely | d706c1b | 2010-04-13 16:12:28 -0700 | [diff] [blame] | 1247 | viodev->dev.of_node = of_node_get(of_node); | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 1248 |  | 
|  | 1249 | if (firmware_has_feature(FW_FEATURE_CMO)) | 
|  | 1250 | vio_cmo_set_dma_ops(viodev); | 
|  | 1251 | else | 
| Nishanth Aravamudan | 6d283d7 | 2010-10-18 07:26:59 +0000 | [diff] [blame] | 1252 | set_dma_ops(&viodev->dev, &dma_iommu_ops); | 
| Becky Bruce | 738ef42 | 2009-09-21 08:26:35 +0000 | [diff] [blame] | 1253 | set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev)); | 
| Becky Bruce | 8fae035 | 2008-09-08 09:09:54 +0000 | [diff] [blame] | 1254 | set_dev_node(&viodev->dev, of_node_to_nid(of_node)); | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1255 |  | 
|  | 1256 | /* init generic 'struct device' fields: */ | 
|  | 1257 | viodev->dev.parent = &vio_bus_device.dev; | 
|  | 1258 | viodev->dev.bus = &vio_bus_type; | 
|  | 1259 | viodev->dev.release = vio_dev_release; | 
| Nishanth Aravamudan | b3c7385 | 2010-10-18 07:27:04 +0000 | [diff] [blame] | 1260 | /* needed to ensure proper operation of coherent allocations | 
|  | 1261 | * later, in case driver doesn't set it explicitly */ | 
|  | 1262 | dma_set_mask(&viodev->dev, DMA_BIT_MASK(64)); | 
|  | 1263 | dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64)); | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1264 |  | 
|  | 1265 | /* register with generic device framework */ | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1266 | if (device_register(&viodev->dev)) { | 
|  | 1267 | printk(KERN_ERR "%s: failed to register device %s\n", | 
| Kay Sievers | aab0d37 | 2008-12-04 10:02:56 -0800 | [diff] [blame] | 1268 | __func__, dev_name(&viodev->dev)); | 
| Nishanth Aravamudan | edea8f6 | 2010-09-15 08:05:47 +0000 | [diff] [blame] | 1269 | put_device(&viodev->dev); | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1270 | return NULL; | 
|  | 1271 | } | 
|  | 1272 |  | 
|  | 1273 | return viodev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1274 | } | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1275 | EXPORT_SYMBOL(vio_register_device_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1277 | /** | 
|  | 1278 | * vio_bus_init: - Initialize the virtual IO bus | 
|  | 1279 | */ | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1280 | static int __init vio_bus_init(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1281 | { | 
|  | 1282 | int err; | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1283 | struct device_node *node_vroot; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1284 |  | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 1285 | if (firmware_has_feature(FW_FEATURE_CMO)) | 
|  | 1286 | vio_cmo_sysfs_init(); | 
|  | 1287 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1288 | err = bus_register(&vio_bus_type); | 
|  | 1289 | if (err) { | 
|  | 1290 | printk(KERN_ERR "failed to register VIO bus\n"); | 
|  | 1291 | return err; | 
|  | 1292 | } | 
|  | 1293 |  | 
| Stephen Rothwell | 5c0b4b8 | 2005-08-17 16:37:35 +1000 | [diff] [blame] | 1294 | /* | 
|  | 1295 | * The fake parent of all vio devices, just to give us | 
| Stephen Rothwell | 3e494c8 | 2005-07-12 17:40:17 +1000 | [diff] [blame] | 1296 | * a nice directory | 
|  | 1297 | */ | 
| Stephen Rothwell | ac5b33c | 2005-06-21 17:15:54 -0700 | [diff] [blame] | 1298 | err = device_register(&vio_bus_device.dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | if (err) { | 
| Stephen Rothwell | 3e494c8 | 2005-07-12 17:40:17 +1000 | [diff] [blame] | 1300 | printk(KERN_WARNING "%s: device_register returned %i\n", | 
| Harvey Harrison | e48b1b4 | 2008-03-29 08:21:07 +1100 | [diff] [blame] | 1301 | __func__, err); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | return err; | 
|  | 1303 | } | 
|  | 1304 |  | 
| Robert Jennings | a90ab95 | 2008-07-24 04:31:33 +1000 | [diff] [blame] | 1305 | if (firmware_has_feature(FW_FEATURE_CMO)) | 
|  | 1306 | vio_cmo_bus_init(); | 
|  | 1307 |  | 
| Stephen Rothwell | 30686ba | 2007-04-24 13:53:04 +1000 | [diff] [blame] | 1308 | node_vroot = of_find_node_by_name(NULL, "vdevice"); | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1309 | if (node_vroot) { | 
|  | 1310 | struct device_node *of_node; | 
|  | 1311 |  | 
|  | 1312 | /* | 
|  | 1313 | * Create struct vio_devices for each virtual device in | 
|  | 1314 | * the device tree. Drivers will associate with them later. | 
|  | 1315 | */ | 
|  | 1316 | for (of_node = node_vroot->child; of_node != NULL; | 
| Stephen Rothwell | c546726 | 2007-09-21 14:29:28 +1000 | [diff] [blame] | 1317 | of_node = of_node->sibling) | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1318 | vio_register_device_node(of_node); | 
| Stephen Rothwell | 30686ba | 2007-04-24 13:53:04 +1000 | [diff] [blame] | 1319 | of_node_put(node_vroot); | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1320 | } | 
|  | 1321 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1322 | return 0; | 
|  | 1323 | } | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1324 | __initcall(vio_bus_init); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1325 |  | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1326 | static ssize_t name_show(struct device *dev, | 
| Stephen Rothwell | 5c0b4b8 | 2005-08-17 16:37:35 +1000 | [diff] [blame] | 1327 | struct device_attribute *attr, char *buf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1328 | { | 
|  | 1329 | return sprintf(buf, "%s\n", to_vio_dev(dev)->name); | 
|  | 1330 | } | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1331 |  | 
|  | 1332 | static ssize_t devspec_show(struct device *dev, | 
|  | 1333 | struct device_attribute *attr, char *buf) | 
|  | 1334 | { | 
| Grant Likely | 58f9b0b | 2010-04-13 16:12:56 -0700 | [diff] [blame] | 1335 | struct device_node *of_node = dev->of_node; | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1336 |  | 
|  | 1337 | return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); | 
|  | 1338 | } | 
|  | 1339 |  | 
| Benjamin Herrenschmidt | 578b7cd | 2010-04-07 14:44:28 +1000 | [diff] [blame] | 1340 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | 
|  | 1341 | char *buf) | 
|  | 1342 | { | 
|  | 1343 | const struct vio_dev *vio_dev = to_vio_dev(dev); | 
|  | 1344 | struct device_node *dn; | 
|  | 1345 | const char *cp; | 
|  | 1346 |  | 
| Grant Likely | cf9b59e | 2010-05-22 00:36:56 -0600 | [diff] [blame] | 1347 | dn = dev->of_node; | 
| Benjamin Herrenschmidt | 578b7cd | 2010-04-07 14:44:28 +1000 | [diff] [blame] | 1348 | if (!dn) | 
|  | 1349 | return -ENODEV; | 
|  | 1350 | cp = of_get_property(dn, "compatible", NULL); | 
|  | 1351 | if (!cp) | 
|  | 1352 | return -ENODEV; | 
|  | 1353 |  | 
|  | 1354 | return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); | 
|  | 1355 | } | 
|  | 1356 |  | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1357 | static struct device_attribute vio_dev_attrs[] = { | 
|  | 1358 | __ATTR_RO(name), | 
|  | 1359 | __ATTR_RO(devspec), | 
| Benjamin Herrenschmidt | 578b7cd | 2010-04-07 14:44:28 +1000 | [diff] [blame] | 1360 | __ATTR_RO(modalias), | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1361 | __ATTR_NULL | 
|  | 1362 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1363 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1364 | void __devinit vio_unregister_device(struct vio_dev *viodev) | 
|  | 1365 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1366 | device_unregister(&viodev->dev); | 
|  | 1367 | } | 
|  | 1368 | EXPORT_SYMBOL(vio_unregister_device); | 
|  | 1369 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 | static int vio_bus_match(struct device *dev, struct device_driver *drv) | 
|  | 1371 | { | 
|  | 1372 | const struct vio_dev *vio_dev = to_vio_dev(dev); | 
|  | 1373 | struct vio_driver *vio_drv = to_vio_driver(drv); | 
|  | 1374 | const struct vio_device_id *ids = vio_drv->id_table; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 |  | 
| Stephen Rothwell | 5c0b4b8 | 2005-08-17 16:37:35 +1000 | [diff] [blame] | 1376 | return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | } | 
|  | 1378 |  | 
| Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1379 | static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) | 
| Olaf Hering | 143dcec | 2005-11-08 21:34:36 -0800 | [diff] [blame] | 1380 | { | 
|  | 1381 | const struct vio_dev *vio_dev = to_vio_dev(dev); | 
| Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 1382 | struct device_node *dn; | 
| Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 1383 | const char *cp; | 
| Olaf Hering | 143dcec | 2005-11-08 21:34:36 -0800 | [diff] [blame] | 1384 |  | 
| Grant Likely | 58f9b0b | 2010-04-13 16:12:56 -0700 | [diff] [blame] | 1385 | dn = dev->of_node; | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1386 | if (!dn) | 
| Olaf Hering | 143dcec | 2005-11-08 21:34:36 -0800 | [diff] [blame] | 1387 | return -ENODEV; | 
| Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1388 | cp = of_get_property(dn, "compatible", NULL); | 
| Olaf Hering | 143dcec | 2005-11-08 21:34:36 -0800 | [diff] [blame] | 1389 | if (!cp) | 
|  | 1390 | return -ENODEV; | 
|  | 1391 |  | 
| Kay Sievers | 7eff2e7 | 2007-08-14 15:15:12 +0200 | [diff] [blame] | 1392 | add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp); | 
| Olaf Hering | 143dcec | 2005-11-08 21:34:36 -0800 | [diff] [blame] | 1393 | return 0; | 
|  | 1394 | } | 
|  | 1395 |  | 
| Stephen Rothwell | 6fccab2 | 2007-09-21 14:32:05 +1000 | [diff] [blame] | 1396 | static struct bus_type vio_bus_type = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1397 | .name = "vio", | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1398 | .dev_attrs = vio_dev_attrs, | 
| Kay Sievers | 312c004 | 2005-11-16 09:00:00 +0100 | [diff] [blame] | 1399 | .uevent = vio_hotplug, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1400 | .match = vio_bus_match, | 
| Russell King | 2f53a80 | 2006-01-05 14:36:47 +0000 | [diff] [blame] | 1401 | .probe = vio_bus_probe, | 
|  | 1402 | .remove = vio_bus_remove, | 
| Brian King | a1263c7 | 2010-05-14 12:04:41 +0000 | [diff] [blame] | 1403 | .pm = GENERIC_SUBSYS_PM_OPS, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1404 | }; | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1405 |  | 
|  | 1406 | /** | 
|  | 1407 | * vio_get_attribute: - get attribute for virtual device | 
|  | 1408 | * @vdev:	The vio device to get property. | 
|  | 1409 | * @which:	The property/attribute to be extracted. | 
|  | 1410 | * @length:	Pointer to length of returned data size (unused if NULL). | 
|  | 1411 | * | 
| Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 1412 | * Calls prom.c's of_get_property() to return the value of the | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1413 | * attribute specified by @which | 
|  | 1414 | */ | 
|  | 1415 | const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) | 
|  | 1416 | { | 
| Grant Likely | 58f9b0b | 2010-04-13 16:12:56 -0700 | [diff] [blame] | 1417 | return of_get_property(vdev->dev.of_node, which, length); | 
| Stephen Rothwell | e10fa77 | 2006-04-27 17:18:21 +1000 | [diff] [blame] | 1418 | } | 
|  | 1419 | EXPORT_SYMBOL(vio_get_attribute); | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1420 |  | 
|  | 1421 | #ifdef CONFIG_PPC_PSERIES | 
|  | 1422 | /* vio_find_name() - internal because only vio.c knows how we formatted the | 
|  | 1423 | * kobject name | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1424 | */ | 
| Paul Mackerras | c847c85 | 2008-01-27 11:45:30 +1100 | [diff] [blame] | 1425 | static struct vio_dev *vio_find_name(const char *name) | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1426 | { | 
| Paul Mackerras | c847c85 | 2008-01-27 11:45:30 +1100 | [diff] [blame] | 1427 | struct device *found; | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1428 |  | 
| Paul Mackerras | c847c85 | 2008-01-27 11:45:30 +1100 | [diff] [blame] | 1429 | found = bus_find_device_by_name(&vio_bus_type, NULL, name); | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1430 | if (!found) | 
|  | 1431 | return NULL; | 
|  | 1432 |  | 
| Paul Mackerras | c847c85 | 2008-01-27 11:45:30 +1100 | [diff] [blame] | 1433 | return to_vio_dev(found); | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1434 | } | 
|  | 1435 |  | 
|  | 1436 | /** | 
|  | 1437 | * vio_find_node - find an already-registered vio_dev | 
|  | 1438 | * @vnode: device_node of the virtual device we're looking for | 
|  | 1439 | */ | 
|  | 1440 | struct vio_dev *vio_find_node(struct device_node *vnode) | 
|  | 1441 | { | 
| Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 1442 | const uint32_t *unit_address; | 
| Kay Sievers | aab0d37 | 2008-12-04 10:02:56 -0800 | [diff] [blame] | 1443 | char kobj_name[20]; | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1444 |  | 
|  | 1445 | /* construct the kobject name from the device node */ | 
| Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 1446 | unit_address = of_get_property(vnode, "reg", NULL); | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1447 | if (!unit_address) | 
|  | 1448 | return NULL; | 
| Kay Sievers | aab0d37 | 2008-12-04 10:02:56 -0800 | [diff] [blame] | 1449 | snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address); | 
| Stephen Rothwell | c7f0e8c | 2006-04-27 17:23:32 +1000 | [diff] [blame] | 1450 |  | 
|  | 1451 | return vio_find_name(kobj_name); | 
|  | 1452 | } | 
|  | 1453 | EXPORT_SYMBOL(vio_find_node); | 
|  | 1454 |  | 
|  | 1455 | int vio_enable_interrupts(struct vio_dev *dev) | 
|  | 1456 | { | 
|  | 1457 | int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE); | 
|  | 1458 | if (rc != H_SUCCESS) | 
|  | 1459 | printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc); | 
|  | 1460 | return rc; | 
|  | 1461 | } | 
|  | 1462 | EXPORT_SYMBOL(vio_enable_interrupts); | 
|  | 1463 |  | 
|  | 1464 | int vio_disable_interrupts(struct vio_dev *dev) | 
|  | 1465 | { | 
|  | 1466 | int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE); | 
|  | 1467 | if (rc != H_SUCCESS) | 
|  | 1468 | printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc); | 
|  | 1469 | return rc; | 
|  | 1470 | } | 
|  | 1471 | EXPORT_SYMBOL(vio_disable_interrupts); | 
|  | 1472 | #endif /* CONFIG_PPC_PSERIES */ |