| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2006, Intel Corporation. | 
|  | 3 | * | 
|  | 4 | * This file is released under the GPLv2. | 
|  | 5 | * | 
| mark gross | 98bcef5 | 2008-02-23 15:23:35 -0800 | [diff] [blame] | 6 | * Copyright (C) 2006-2008 Intel Corporation | 
|  | 7 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 8 | */ | 
|  | 9 |  | 
|  | 10 | #include "iova.h" | 
|  | 11 |  | 
|  | 12 | void | 
| David Miller | f661197 | 2008-02-06 01:36:23 -0800 | [diff] [blame] | 13 | init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 14 | { | 
|  | 15 | spin_lock_init(&iovad->iova_alloc_lock); | 
|  | 16 | spin_lock_init(&iovad->iova_rbtree_lock); | 
|  | 17 | iovad->rbroot = RB_ROOT; | 
|  | 18 | iovad->cached32_node = NULL; | 
| David Miller | f661197 | 2008-02-06 01:36:23 -0800 | [diff] [blame] | 19 | iovad->dma_32bit_pfn = pfn_32bit; | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 20 | } | 
|  | 21 |  | 
|  | 22 | static struct rb_node * | 
|  | 23 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) | 
|  | 24 | { | 
| David Miller | f661197 | 2008-02-06 01:36:23 -0800 | [diff] [blame] | 25 | if ((*limit_pfn != iovad->dma_32bit_pfn) || | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 26 | (iovad->cached32_node == NULL)) | 
|  | 27 | return rb_last(&iovad->rbroot); | 
|  | 28 | else { | 
|  | 29 | struct rb_node *prev_node = rb_prev(iovad->cached32_node); | 
|  | 30 | struct iova *curr_iova = | 
|  | 31 | container_of(iovad->cached32_node, struct iova, node); | 
|  | 32 | *limit_pfn = curr_iova->pfn_lo - 1; | 
|  | 33 | return prev_node; | 
|  | 34 | } | 
|  | 35 | } | 
|  | 36 |  | 
|  | 37 | static void | 
|  | 38 | __cached_rbnode_insert_update(struct iova_domain *iovad, | 
|  | 39 | unsigned long limit_pfn, struct iova *new) | 
|  | 40 | { | 
| David Miller | f661197 | 2008-02-06 01:36:23 -0800 | [diff] [blame] | 41 | if (limit_pfn != iovad->dma_32bit_pfn) | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 42 | return; | 
|  | 43 | iovad->cached32_node = &new->node; | 
|  | 44 | } | 
|  | 45 |  | 
|  | 46 | static void | 
|  | 47 | __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | 
|  | 48 | { | 
|  | 49 | struct iova *cached_iova; | 
|  | 50 | struct rb_node *curr; | 
|  | 51 |  | 
|  | 52 | if (!iovad->cached32_node) | 
|  | 53 | return; | 
|  | 54 | curr = iovad->cached32_node; | 
|  | 55 | cached_iova = container_of(curr, struct iova, node); | 
|  | 56 |  | 
|  | 57 | if (free->pfn_lo >= cached_iova->pfn_lo) | 
|  | 58 | iovad->cached32_node = rb_next(&free->node); | 
|  | 59 | } | 
|  | 60 |  | 
| Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 61 | /* Computes the padding size required, to make the | 
|  | 62 | * the start address naturally aligned on its size | 
|  | 63 | */ | 
|  | 64 | static int | 
|  | 65 | iova_get_pad_size(int size, unsigned int limit_pfn) | 
|  | 66 | { | 
|  | 67 | unsigned int pad_size = 0; | 
|  | 68 | unsigned int order = ilog2(size); | 
|  | 69 |  | 
|  | 70 | if (order) | 
|  | 71 | pad_size = (limit_pfn + 1) % (1 << order); | 
|  | 72 |  | 
|  | 73 | return pad_size; | 
|  | 74 | } | 
|  | 75 |  | 
| mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 76 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, | 
|  | 77 | unsigned long size, unsigned long limit_pfn, | 
|  | 78 | struct iova *new, bool size_aligned) | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 79 | { | 
| mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 80 | struct rb_node *prev, *curr = NULL; | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 81 | unsigned long flags; | 
|  | 82 | unsigned long saved_pfn; | 
| Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 83 | unsigned int pad_size = 0; | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 84 |  | 
|  | 85 | /* Walk the tree backwards */ | 
|  | 86 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | 
|  | 87 | saved_pfn = limit_pfn; | 
|  | 88 | curr = __get_cached_rbnode(iovad, &limit_pfn); | 
| mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 89 | prev = curr; | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 90 | while (curr) { | 
|  | 91 | struct iova *curr_iova = container_of(curr, struct iova, node); | 
| mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 92 |  | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 93 | if (limit_pfn < curr_iova->pfn_lo) | 
|  | 94 | goto move_left; | 
| Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 95 | else if (limit_pfn < curr_iova->pfn_hi) | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 96 | goto adjust_limit_pfn; | 
| Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 97 | else { | 
|  | 98 | if (size_aligned) | 
|  | 99 | pad_size = iova_get_pad_size(size, limit_pfn); | 
|  | 100 | if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) | 
|  | 101 | break;	/* found a free slot */ | 
|  | 102 | } | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 103 | adjust_limit_pfn: | 
|  | 104 | limit_pfn = curr_iova->pfn_lo - 1; | 
|  | 105 | move_left: | 
| mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 106 | prev = curr; | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 107 | curr = rb_prev(curr); | 
|  | 108 | } | 
|  | 109 |  | 
| Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 110 | if (!curr) { | 
|  | 111 | if (size_aligned) | 
|  | 112 | pad_size = iova_get_pad_size(size, limit_pfn); | 
|  | 113 | if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { | 
|  | 114 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 
|  | 115 | return -ENOMEM; | 
|  | 116 | } | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 117 | } | 
| Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 118 |  | 
|  | 119 | /* pfn_lo will point to size aligned address if size_aligned is set */ | 
|  | 120 | new->pfn_lo = limit_pfn - (size + pad_size) + 1; | 
|  | 121 | new->pfn_hi = new->pfn_lo + size - 1; | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 122 |  | 
| mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 123 | /* Insert the new_iova into domain rbtree by holding writer lock */ | 
|  | 124 | /* Add new node and rebalance tree. */ | 
|  | 125 | { | 
|  | 126 | struct rb_node **entry = &((prev)), *parent = NULL; | 
|  | 127 | /* Figure out where to put new node */ | 
|  | 128 | while (*entry) { | 
|  | 129 | struct iova *this = container_of(*entry, | 
|  | 130 | struct iova, node); | 
|  | 131 | parent = *entry; | 
|  | 132 |  | 
|  | 133 | if (new->pfn_lo < this->pfn_lo) | 
|  | 134 | entry = &((*entry)->rb_left); | 
|  | 135 | else if (new->pfn_lo > this->pfn_lo) | 
|  | 136 | entry = &((*entry)->rb_right); | 
|  | 137 | else | 
|  | 138 | BUG(); /* this should not happen */ | 
|  | 139 | } | 
|  | 140 |  | 
|  | 141 | /* Add new node and rebalance tree. */ | 
|  | 142 | rb_link_node(&new->node, parent, entry); | 
|  | 143 | rb_insert_color(&new->node, &iovad->rbroot); | 
|  | 144 | } | 
|  | 145 | __cached_rbnode_insert_update(iovad, saved_pfn, new); | 
|  | 146 |  | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 147 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 
| mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 148 |  | 
|  | 149 |  | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 150 | return 0; | 
|  | 151 | } | 
|  | 152 |  | 
|  | 153 | static void | 
|  | 154 | iova_insert_rbtree(struct rb_root *root, struct iova *iova) | 
|  | 155 | { | 
|  | 156 | struct rb_node **new = &(root->rb_node), *parent = NULL; | 
|  | 157 | /* Figure out where to put new node */ | 
|  | 158 | while (*new) { | 
|  | 159 | struct iova *this = container_of(*new, struct iova, node); | 
|  | 160 | parent = *new; | 
|  | 161 |  | 
|  | 162 | if (iova->pfn_lo < this->pfn_lo) | 
|  | 163 | new = &((*new)->rb_left); | 
|  | 164 | else if (iova->pfn_lo > this->pfn_lo) | 
|  | 165 | new = &((*new)->rb_right); | 
|  | 166 | else | 
|  | 167 | BUG(); /* this should not happen */ | 
|  | 168 | } | 
|  | 169 | /* Add new node and rebalance tree. */ | 
|  | 170 | rb_link_node(&iova->node, parent, new); | 
|  | 171 | rb_insert_color(&iova->node, root); | 
|  | 172 | } | 
|  | 173 |  | 
|  | 174 | /** | 
|  | 175 | * alloc_iova - allocates an iova | 
|  | 176 | * @iovad - iova domain in question | 
|  | 177 | * @size - size of page frames to allocate | 
|  | 178 | * @limit_pfn - max limit address | 
| Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 179 | * @size_aligned - set if size_aligned address range is required | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 180 | * This function allocates an iova in the range limit_pfn to IOVA_START_PFN | 
| Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 181 | * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned | 
|  | 182 | * flag is set then the allocated address iova->pfn_lo will be naturally | 
|  | 183 | * aligned on roundup_power_of_two(size). | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 184 | */ | 
|  | 185 | struct iova * | 
|  | 186 | alloc_iova(struct iova_domain *iovad, unsigned long size, | 
| Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 187 | unsigned long limit_pfn, | 
|  | 188 | bool size_aligned) | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 189 | { | 
|  | 190 | unsigned long flags; | 
|  | 191 | struct iova *new_iova; | 
|  | 192 | int ret; | 
|  | 193 |  | 
|  | 194 | new_iova = alloc_iova_mem(); | 
|  | 195 | if (!new_iova) | 
|  | 196 | return NULL; | 
|  | 197 |  | 
| Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 198 | /* If size aligned is set then round the size to | 
|  | 199 | * to next power of two. | 
|  | 200 | */ | 
|  | 201 | if (size_aligned) | 
|  | 202 | size = __roundup_pow_of_two(size); | 
|  | 203 |  | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 204 | spin_lock_irqsave(&iovad->iova_alloc_lock, flags); | 
| mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 205 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, | 
|  | 206 | new_iova, size_aligned); | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 207 |  | 
| mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 208 | spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 209 | if (ret) { | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 210 | free_iova_mem(new_iova); | 
|  | 211 | return NULL; | 
|  | 212 | } | 
|  | 213 |  | 
| Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 214 | return new_iova; | 
|  | 215 | } | 
|  | 216 |  | 
|  | 217 | /** | 
|  | 218 | * find_iova - find's an iova for a given pfn | 
|  | 219 | * @iovad - iova domain in question. | 
|  | 220 | * pfn - page frame number | 
|  | 221 | * This function finds and returns an iova belonging to the | 
|  | 222 | * given doamin which matches the given pfn. | 
|  | 223 | */ | 
|  | 224 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) | 
|  | 225 | { | 
|  | 226 | unsigned long flags; | 
|  | 227 | struct rb_node *node; | 
|  | 228 |  | 
|  | 229 | /* Take the lock so that no other thread is manipulating the rbtree */ | 
|  | 230 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | 
|  | 231 | node = iovad->rbroot.rb_node; | 
|  | 232 | while (node) { | 
|  | 233 | struct iova *iova = container_of(node, struct iova, node); | 
|  | 234 |  | 
|  | 235 | /* If pfn falls within iova's range, return iova */ | 
|  | 236 | if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { | 
|  | 237 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 
|  | 238 | /* We are not holding the lock while this iova | 
|  | 239 | * is referenced by the caller as the same thread | 
|  | 240 | * which called this function also calls __free_iova() | 
|  | 241 | * and it is by desing that only one thread can possibly | 
|  | 242 | * reference a particular iova and hence no conflict. | 
|  | 243 | */ | 
|  | 244 | return iova; | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 | if (pfn < iova->pfn_lo) | 
|  | 248 | node = node->rb_left; | 
|  | 249 | else if (pfn > iova->pfn_lo) | 
|  | 250 | node = node->rb_right; | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 
|  | 254 | return NULL; | 
|  | 255 | } | 
|  | 256 |  | 
|  | 257 | /** | 
|  | 258 | * __free_iova - frees the given iova | 
|  | 259 | * @iovad: iova domain in question. | 
|  | 260 | * @iova: iova in question. | 
|  | 261 | * Frees the given iova belonging to the giving domain | 
|  | 262 | */ | 
|  | 263 | void | 
|  | 264 | __free_iova(struct iova_domain *iovad, struct iova *iova) | 
|  | 265 | { | 
|  | 266 | unsigned long flags; | 
|  | 267 |  | 
|  | 268 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | 
|  | 269 | __cached_rbnode_delete_update(iovad, iova); | 
|  | 270 | rb_erase(&iova->node, &iovad->rbroot); | 
|  | 271 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 
|  | 272 | free_iova_mem(iova); | 
|  | 273 | } | 
|  | 274 |  | 
|  | 275 | /** | 
|  | 276 | * free_iova - finds and frees the iova for a given pfn | 
|  | 277 | * @iovad: - iova domain in question. | 
|  | 278 | * @pfn: - pfn that is allocated previously | 
|  | 279 | * This functions finds an iova for a given pfn and then | 
|  | 280 | * frees the iova from that domain. | 
|  | 281 | */ | 
|  | 282 | void | 
|  | 283 | free_iova(struct iova_domain *iovad, unsigned long pfn) | 
|  | 284 | { | 
|  | 285 | struct iova *iova = find_iova(iovad, pfn); | 
|  | 286 | if (iova) | 
|  | 287 | __free_iova(iovad, iova); | 
|  | 288 |  | 
|  | 289 | } | 
|  | 290 |  | 
|  | 291 | /** | 
|  | 292 | * put_iova_domain - destroys the iova doamin | 
|  | 293 | * @iovad: - iova domain in question. | 
|  | 294 | * All the iova's in that domain are destroyed. | 
|  | 295 | */ | 
|  | 296 | void put_iova_domain(struct iova_domain *iovad) | 
|  | 297 | { | 
|  | 298 | struct rb_node *node; | 
|  | 299 | unsigned long flags; | 
|  | 300 |  | 
|  | 301 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); | 
|  | 302 | node = rb_first(&iovad->rbroot); | 
|  | 303 | while (node) { | 
|  | 304 | struct iova *iova = container_of(node, struct iova, node); | 
|  | 305 | rb_erase(node, &iovad->rbroot); | 
|  | 306 | free_iova_mem(iova); | 
|  | 307 | node = rb_first(&iovad->rbroot); | 
|  | 308 | } | 
|  | 309 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | static int | 
|  | 313 | __is_range_overlap(struct rb_node *node, | 
|  | 314 | unsigned long pfn_lo, unsigned long pfn_hi) | 
|  | 315 | { | 
|  | 316 | struct iova *iova = container_of(node, struct iova, node); | 
|  | 317 |  | 
|  | 318 | if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) | 
|  | 319 | return 1; | 
|  | 320 | return 0; | 
|  | 321 | } | 
|  | 322 |  | 
|  | 323 | static struct iova * | 
|  | 324 | __insert_new_range(struct iova_domain *iovad, | 
|  | 325 | unsigned long pfn_lo, unsigned long pfn_hi) | 
|  | 326 | { | 
|  | 327 | struct iova *iova; | 
|  | 328 |  | 
|  | 329 | iova = alloc_iova_mem(); | 
|  | 330 | if (!iova) | 
|  | 331 | return iova; | 
|  | 332 |  | 
|  | 333 | iova->pfn_hi = pfn_hi; | 
|  | 334 | iova->pfn_lo = pfn_lo; | 
|  | 335 | iova_insert_rbtree(&iovad->rbroot, iova); | 
|  | 336 | return iova; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | static void | 
|  | 340 | __adjust_overlap_range(struct iova *iova, | 
|  | 341 | unsigned long *pfn_lo, unsigned long *pfn_hi) | 
|  | 342 | { | 
|  | 343 | if (*pfn_lo < iova->pfn_lo) | 
|  | 344 | iova->pfn_lo = *pfn_lo; | 
|  | 345 | if (*pfn_hi > iova->pfn_hi) | 
|  | 346 | *pfn_lo = iova->pfn_hi + 1; | 
|  | 347 | } | 
|  | 348 |  | 
|  | 349 | /** | 
|  | 350 | * reserve_iova - reserves an iova in the given range | 
|  | 351 | * @iovad: - iova domain pointer | 
|  | 352 | * @pfn_lo: - lower page frame address | 
|  | 353 | * @pfn_hi:- higher pfn adderss | 
|  | 354 | * This function allocates reserves the address range from pfn_lo to pfn_hi so | 
|  | 355 | * that this address is not dished out as part of alloc_iova. | 
|  | 356 | */ | 
|  | 357 | struct iova * | 
|  | 358 | reserve_iova(struct iova_domain *iovad, | 
|  | 359 | unsigned long pfn_lo, unsigned long pfn_hi) | 
|  | 360 | { | 
|  | 361 | struct rb_node *node; | 
|  | 362 | unsigned long flags; | 
|  | 363 | struct iova *iova; | 
|  | 364 | unsigned int overlap = 0; | 
|  | 365 |  | 
|  | 366 | spin_lock_irqsave(&iovad->iova_alloc_lock, flags); | 
|  | 367 | spin_lock(&iovad->iova_rbtree_lock); | 
|  | 368 | for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { | 
|  | 369 | if (__is_range_overlap(node, pfn_lo, pfn_hi)) { | 
|  | 370 | iova = container_of(node, struct iova, node); | 
|  | 371 | __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); | 
|  | 372 | if ((pfn_lo >= iova->pfn_lo) && | 
|  | 373 | (pfn_hi <= iova->pfn_hi)) | 
|  | 374 | goto finish; | 
|  | 375 | overlap = 1; | 
|  | 376 |  | 
|  | 377 | } else if (overlap) | 
|  | 378 | break; | 
|  | 379 | } | 
|  | 380 |  | 
|  | 381 | /* We are here either becasue this is the first reserver node | 
|  | 382 | * or need to insert remaining non overlap addr range | 
|  | 383 | */ | 
|  | 384 | iova = __insert_new_range(iovad, pfn_lo, pfn_hi); | 
|  | 385 | finish: | 
|  | 386 |  | 
|  | 387 | spin_unlock(&iovad->iova_rbtree_lock); | 
|  | 388 | spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); | 
|  | 389 | return iova; | 
|  | 390 | } | 
|  | 391 |  | 
|  | 392 | /** | 
|  | 393 | * copy_reserved_iova - copies the reserved between domains | 
|  | 394 | * @from: - source doamin from where to copy | 
|  | 395 | * @to: - destination domin where to copy | 
|  | 396 | * This function copies reserved iova's from one doamin to | 
|  | 397 | * other. | 
|  | 398 | */ | 
|  | 399 | void | 
|  | 400 | copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | 
|  | 401 | { | 
|  | 402 | unsigned long flags; | 
|  | 403 | struct rb_node *node; | 
|  | 404 |  | 
|  | 405 | spin_lock_irqsave(&from->iova_alloc_lock, flags); | 
|  | 406 | spin_lock(&from->iova_rbtree_lock); | 
|  | 407 | for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { | 
|  | 408 | struct iova *iova = container_of(node, struct iova, node); | 
|  | 409 | struct iova *new_iova; | 
|  | 410 | new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); | 
|  | 411 | if (!new_iova) | 
|  | 412 | printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", | 
|  | 413 | iova->pfn_lo, iova->pfn_lo); | 
|  | 414 | } | 
|  | 415 | spin_unlock(&from->iova_rbtree_lock); | 
|  | 416 | spin_unlock_irqrestore(&from->iova_alloc_lock, flags); | 
|  | 417 | } |