blob: e6c0d5bb5dcb68b897e4e3f6d640d2fc1f69e6e3 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/errno.h>
35#include <linux/slab.h>
36#include <linux/bitmap.h>
Al Viro9cbe05c2007-05-15 20:36:30 +010037#include <linux/dma-mapping.h>
Olof Johansson29c27112008-02-10 20:22:57 -060038#include <linux/vmalloc.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070039
40#include "mlx4.h"
41
42u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
43{
44 u32 obj;
45
46 spin_lock(&bitmap->lock);
47
48 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
49 if (obj >= bitmap->max) {
50 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
51 obj = find_first_zero_bit(bitmap->table, bitmap->max);
52 }
53
54 if (obj < bitmap->max) {
55 set_bit(obj, bitmap->table);
Roland Dreiera2cb4a92007-05-29 16:07:09 -070056 bitmap->last = (obj + 1) & (bitmap->max - 1);
Roland Dreier225c7b12007-05-08 18:00:38 -070057 obj |= bitmap->top;
Roland Dreier225c7b12007-05-08 18:00:38 -070058 } else
59 obj = -1;
60
61 spin_unlock(&bitmap->lock);
62
63 return obj;
64}
65
66void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
67{
Yevgeny Petrilina3cdcbf2008-10-10 12:01:37 -070068 mlx4_bitmap_free_range(bitmap, obj, 1);
69}
70
71static unsigned long find_aligned_range(unsigned long *bitmap,
72 u32 start, u32 nbits,
73 int len, int align)
74{
75 unsigned long end, i;
76
77again:
78 start = ALIGN(start, align);
79
80 while ((start < nbits) && test_bit(start, bitmap))
81 start += align;
82
83 if (start >= nbits)
84 return -1;
85
86 end = start+len;
87 if (end > nbits)
88 return -1;
89
90 for (i = start + 1; i < end; i++) {
91 if (test_bit(i, bitmap)) {
92 start = i + 1;
93 goto again;
94 }
95 }
96
97 return start;
98}
99
100u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
101{
102 u32 obj, i;
103
104 if (likely(cnt == 1 && align == 1))
105 return mlx4_bitmap_alloc(bitmap);
106
107 spin_lock(&bitmap->lock);
108
109 obj = find_aligned_range(bitmap->table, bitmap->last,
110 bitmap->max, cnt, align);
111 if (obj >= bitmap->max) {
112 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
113 obj = find_aligned_range(bitmap->table, 0,
114 bitmap->max,
115 cnt, align);
116 }
117
118 if (obj < bitmap->max) {
119 for (i = 0; i < cnt; i++)
120 set_bit(obj + i, bitmap->table);
121 if (obj == bitmap->last) {
122 bitmap->last = (obj + cnt);
123 if (bitmap->last >= bitmap->max)
124 bitmap->last = 0;
125 }
126 obj |= bitmap->top;
127 } else
128 obj = -1;
129
130 spin_unlock(&bitmap->lock);
131
132 return obj;
133}
134
135void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
136{
137 u32 i;
138
Roland Dreier225c7b12007-05-08 18:00:38 -0700139 obj &= bitmap->max - 1;
140
141 spin_lock(&bitmap->lock);
Yevgeny Petrilina3cdcbf2008-10-10 12:01:37 -0700142 for (i = 0; i < cnt; i++)
143 clear_bit(obj + i, bitmap->table);
Roland Dreier225c7b12007-05-08 18:00:38 -0700144 bitmap->last = min(bitmap->last, obj);
145 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
146 spin_unlock(&bitmap->lock);
147}
148
149int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved)
150{
151 int i;
152
153 /* num must be a power of 2 */
154 if (num != roundup_pow_of_two(num))
155 return -EINVAL;
156
157 bitmap->last = 0;
158 bitmap->top = 0;
159 bitmap->max = num;
160 bitmap->mask = mask;
161 spin_lock_init(&bitmap->lock);
162 bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL);
163 if (!bitmap->table)
164 return -ENOMEM;
165
166 for (i = 0; i < reserved; ++i)
167 set_bit(i, bitmap->table);
168
169 return 0;
170}
171
172void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
173{
174 kfree(bitmap->table);
175}
176
177/*
178 * Handling for queue buffers -- we allocate a bunch of memory and
179 * register it in a memory region at HCA virtual address 0. If the
180 * requested size is > max_direct, we split the allocation into
181 * multiple pages, so we don't require too much contiguous memory.
182 */
183
184int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
185 struct mlx4_buf *buf)
186{
187 dma_addr_t t;
188
189 if (size <= max_direct) {
190 buf->nbufs = 1;
191 buf->npages = 1;
192 buf->page_shift = get_order(size) + PAGE_SHIFT;
Roland Dreierb57aacf2008-02-06 21:17:59 -0800193 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
Roland Dreier225c7b12007-05-08 18:00:38 -0700194 size, &t, GFP_KERNEL);
Roland Dreierb57aacf2008-02-06 21:17:59 -0800195 if (!buf->direct.buf)
Roland Dreier225c7b12007-05-08 18:00:38 -0700196 return -ENOMEM;
197
Roland Dreierb57aacf2008-02-06 21:17:59 -0800198 buf->direct.map = t;
Roland Dreier225c7b12007-05-08 18:00:38 -0700199
200 while (t & ((1 << buf->page_shift) - 1)) {
201 --buf->page_shift;
202 buf->npages *= 2;
203 }
204
Roland Dreierb57aacf2008-02-06 21:17:59 -0800205 memset(buf->direct.buf, 0, size);
Roland Dreier225c7b12007-05-08 18:00:38 -0700206 } else {
207 int i;
208
209 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
210 buf->npages = buf->nbufs;
211 buf->page_shift = PAGE_SHIFT;
Roland Dreierb57aacf2008-02-06 21:17:59 -0800212 buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list,
Roland Dreier225c7b12007-05-08 18:00:38 -0700213 GFP_KERNEL);
Roland Dreierb57aacf2008-02-06 21:17:59 -0800214 if (!buf->page_list)
Roland Dreier225c7b12007-05-08 18:00:38 -0700215 return -ENOMEM;
216
217 for (i = 0; i < buf->nbufs; ++i) {
Roland Dreierb57aacf2008-02-06 21:17:59 -0800218 buf->page_list[i].buf =
Roland Dreier225c7b12007-05-08 18:00:38 -0700219 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
220 &t, GFP_KERNEL);
Roland Dreierb57aacf2008-02-06 21:17:59 -0800221 if (!buf->page_list[i].buf)
Roland Dreier225c7b12007-05-08 18:00:38 -0700222 goto err_free;
223
Roland Dreierb57aacf2008-02-06 21:17:59 -0800224 buf->page_list[i].map = t;
Roland Dreier225c7b12007-05-08 18:00:38 -0700225
Roland Dreierb57aacf2008-02-06 21:17:59 -0800226 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
Roland Dreier225c7b12007-05-08 18:00:38 -0700227 }
Jack Morgenstein313abe52008-01-28 10:40:51 +0200228
229 if (BITS_PER_LONG == 64) {
230 struct page **pages;
231 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
232 if (!pages)
233 goto err_free;
234 for (i = 0; i < buf->nbufs; ++i)
Roland Dreierb57aacf2008-02-06 21:17:59 -0800235 pages[i] = virt_to_page(buf->page_list[i].buf);
236 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
Jack Morgenstein313abe52008-01-28 10:40:51 +0200237 kfree(pages);
Roland Dreierb57aacf2008-02-06 21:17:59 -0800238 if (!buf->direct.buf)
Jack Morgenstein313abe52008-01-28 10:40:51 +0200239 goto err_free;
240 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700241 }
242
243 return 0;
244
245err_free:
246 mlx4_buf_free(dev, size, buf);
247
248 return -ENOMEM;
249}
250EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
251
252void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
253{
254 int i;
255
256 if (buf->nbufs == 1)
Roland Dreierb57aacf2008-02-06 21:17:59 -0800257 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
258 buf->direct.map);
Roland Dreier225c7b12007-05-08 18:00:38 -0700259 else {
Jack Morgenstein313abe52008-01-28 10:40:51 +0200260 if (BITS_PER_LONG == 64)
Roland Dreierb57aacf2008-02-06 21:17:59 -0800261 vunmap(buf->direct.buf);
Jack Morgenstein313abe52008-01-28 10:40:51 +0200262
Roland Dreier225c7b12007-05-08 18:00:38 -0700263 for (i = 0; i < buf->nbufs; ++i)
Roland Dreierb57aacf2008-02-06 21:17:59 -0800264 if (buf->page_list[i].buf)
Ali Ayoub3bba11e2007-11-13 15:26:57 -0800265 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
Roland Dreierb57aacf2008-02-06 21:17:59 -0800266 buf->page_list[i].buf,
267 buf->page_list[i].map);
268 kfree(buf->page_list);
Roland Dreier225c7b12007-05-08 18:00:38 -0700269 }
270}
271EXPORT_SYMBOL_GPL(mlx4_buf_free);
Yevgeny Petrilin62968832008-04-23 11:55:45 -0700272
273static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
274{
275 struct mlx4_db_pgdir *pgdir;
276
277 pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
278 if (!pgdir)
279 return NULL;
280
281 bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
282 pgdir->bits[0] = pgdir->order0;
283 pgdir->bits[1] = pgdir->order1;
284 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
285 &pgdir->db_dma, GFP_KERNEL);
286 if (!pgdir->db_page) {
287 kfree(pgdir);
288 return NULL;
289 }
290
291 return pgdir;
292}
293
294static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
295 struct mlx4_db *db, int order)
296{
297 int o;
298 int i;
299
300 for (o = order; o <= 1; ++o) {
301 i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
302 if (i < MLX4_DB_PER_PAGE >> o)
303 goto found;
304 }
305
306 return -ENOMEM;
307
308found:
309 clear_bit(i, pgdir->bits[o]);
310
311 i <<= o;
312
313 if (o > order)
314 set_bit(i ^ 1, pgdir->bits[order]);
315
316 db->u.pgdir = pgdir;
317 db->index = i;
318 db->db = pgdir->db_page + db->index;
319 db->dma = pgdir->db_dma + db->index * 4;
320 db->order = order;
321
322 return 0;
323}
324
325int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
326{
327 struct mlx4_priv *priv = mlx4_priv(dev);
328 struct mlx4_db_pgdir *pgdir;
329 int ret = 0;
330
331 mutex_lock(&priv->pgdir_mutex);
332
333 list_for_each_entry(pgdir, &priv->pgdir_list, list)
334 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
335 goto out;
336
337 pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
338 if (!pgdir) {
339 ret = -ENOMEM;
340 goto out;
341 }
342
343 list_add(&pgdir->list, &priv->pgdir_list);
344
345 /* This should never fail -- we just allocated an empty page: */
346 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
347
348out:
349 mutex_unlock(&priv->pgdir_mutex);
350
351 return ret;
352}
353EXPORT_SYMBOL_GPL(mlx4_db_alloc);
354
355void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
356{
357 struct mlx4_priv *priv = mlx4_priv(dev);
358 int o;
359 int i;
360
361 mutex_lock(&priv->pgdir_mutex);
362
363 o = db->order;
364 i = db->index;
365
366 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
367 clear_bit(i ^ 1, db->u.pgdir->order0);
368 ++o;
369 }
370 i >>= o;
371 set_bit(i, db->u.pgdir->bits[o]);
372
373 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
374 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
375 db->u.pgdir->db_page, db->u.pgdir->db_dma);
376 list_del(&db->u.pgdir->list);
377 kfree(db->u.pgdir);
378 }
379
380 mutex_unlock(&priv->pgdir_mutex);
381}
382EXPORT_SYMBOL_GPL(mlx4_db_free);
Yevgeny Petrilin38ae6a52008-04-25 14:27:08 -0700383
384int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
385 int size, int max_direct)
386{
387 int err;
388
389 err = mlx4_db_alloc(dev, &wqres->db, 1);
390 if (err)
391 return err;
392
393 *wqres->db.db = 0;
394
395 err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
396 if (err)
397 goto err_db;
398
399 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
400 &wqres->mtt);
401 if (err)
402 goto err_buf;
403
404 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
405 if (err)
406 goto err_mtt;
407
408 return 0;
409
410err_mtt:
411 mlx4_mtt_cleanup(dev, &wqres->mtt);
412err_buf:
413 mlx4_buf_free(dev, size, &wqres->buf);
414err_db:
415 mlx4_db_free(dev, &wqres->db);
416
417 return err;
418}
419EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
420
421void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
422 int size)
423{
424 mlx4_mtt_cleanup(dev, &wqres->mtt);
425 mlx4_buf_free(dev, size, &wqres->buf);
426 mlx4_db_free(dev, &wqres->db);
427}
428EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);