blob: c212de701529b9b6ce3dd7b3364f5923b1400542 [file] [log] [blame]
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
Rusty Russelle34f8722008-07-25 12:06:13 -050021#include <linux/virtio_config.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100022#include <linux/device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Paul Gortmakerb5a2c4f2011-07-03 16:20:30 -040024#include <linux/module.h>
Rusty Russelle93300b2012-01-12 15:44:43 +103025#include <linux/hrtimer.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100026
Michael S. Tsirkind57ed952010-01-28 00:42:23 +020027/* virtio guest is communicating with a virtual "device" that actually runs on
28 * a host processor. Memory barriers are used to control SMP effects. */
29#ifdef CONFIG_SMP
30/* Where possible, use SMP barriers which are more lightweight than mandatory
31 * barriers, because mandatory barriers control MMIO effects on accesses
Rusty Russell7b21e342012-01-12 15:44:42 +103032 * through relaxed memory I/O windows (which virtio-pci does not use). */
33#define virtio_mb(vq) \
34 do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
35#define virtio_rmb(vq) \
36 do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
37#define virtio_wmb(vq) \
Jason Wang4dbc5d92012-01-20 16:16:59 +080038 do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
Michael S. Tsirkind57ed952010-01-28 00:42:23 +020039#else
40/* We must force memory ordering even if guest is UP since host could be
41 * running on another CPU, but SMP barriers are defined to barrier() in that
42 * configuration. So fall back to mandatory barriers instead. */
Rusty Russell7b21e342012-01-12 15:44:42 +103043#define virtio_mb(vq) mb()
44#define virtio_rmb(vq) rmb()
45#define virtio_wmb(vq) wmb()
Michael S. Tsirkind57ed952010-01-28 00:42:23 +020046#endif
47
Rusty Russell0a8a69d2007-10-22 11:03:40 +100048#ifdef DEBUG
49/* For development, we want to crash whenever the ring is screwed. */
Rusty Russell9499f5e2009-06-12 22:16:35 -060050#define BAD_RING(_vq, fmt, args...) \
51 do { \
52 dev_err(&(_vq)->vq.vdev->dev, \
53 "%s:"fmt, (_vq)->vq.name, ##args); \
54 BUG(); \
55 } while (0)
Rusty Russellc5f841f2009-03-30 21:55:22 -060056/* Caller is supposed to guarantee no reentry. */
57#define START_USE(_vq) \
58 do { \
59 if ((_vq)->in_use) \
Rusty Russell9499f5e2009-06-12 22:16:35 -060060 panic("%s:in_use = %i\n", \
61 (_vq)->vq.name, (_vq)->in_use); \
Rusty Russellc5f841f2009-03-30 21:55:22 -060062 (_vq)->in_use = __LINE__; \
Rusty Russell9499f5e2009-06-12 22:16:35 -060063 } while (0)
Roel Kluin3a35ce72009-01-22 16:42:57 +010064#define END_USE(_vq) \
Rusty Russell97a545a2010-02-24 14:22:22 -060065 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100066#else
Rusty Russell9499f5e2009-06-12 22:16:35 -060067#define BAD_RING(_vq, fmt, args...) \
68 do { \
69 dev_err(&_vq->vq.vdev->dev, \
70 "%s:"fmt, (_vq)->vq.name, ##args); \
71 (_vq)->broken = true; \
72 } while (0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100073#define START_USE(vq)
74#define END_USE(vq)
75#endif
76
77struct vring_virtqueue
78{
79 struct virtqueue vq;
80
81 /* Actual memory layout for this queue */
82 struct vring vring;
83
Rusty Russell7b21e342012-01-12 15:44:42 +103084 /* Can we use weak barriers? */
85 bool weak_barriers;
86
Rusty Russell0a8a69d2007-10-22 11:03:40 +100087 /* Other side has made a mess, don't try any more. */
88 bool broken;
89
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +010090 /* Host supports indirect buffers */
91 bool indirect;
92
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +030093 /* Host publishes avail event idx */
94 bool event;
95
Rusty Russell0a8a69d2007-10-22 11:03:40 +100096 /* Number of free buffers */
97 unsigned int num_free;
98 /* Head of free buffer list. */
99 unsigned int free_head;
100 /* Number we've added since last sync. */
101 unsigned int num_added;
102
103 /* Last used index we've seen. */
Anthony Liguori1bc49532007-11-07 15:49:24 -0600104 u16 last_used_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000105
106 /* How to notify other side. FIXME: commonalize hcalls! */
107 void (*notify)(struct virtqueue *vq);
108
109#ifdef DEBUG
110 /* They're supposed to lock for us. */
111 unsigned int in_use;
Rusty Russelle93300b2012-01-12 15:44:43 +1030112
113 /* Figure out if their kicks are too delayed. */
114 bool last_add_time_valid;
115 ktime_t last_add_time;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000116#endif
117
118 /* Tokens for callbacks. */
119 void *data[];
120};
121
122#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
123
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100124/* Set up an indirect table of descriptors and add it to the queue. */
125static int vring_add_indirect(struct vring_virtqueue *vq,
126 struct scatterlist sg[],
127 unsigned int out,
Michael S. Tsirkinbbd603e2010-04-29 17:26:37 +0300128 unsigned int in,
129 gfp_t gfp)
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100130{
131 struct vring_desc *desc;
132 unsigned head;
133 int i;
134
Will Deacon6cb6a942012-10-19 14:03:33 +0100135 /*
136 * We require lowmem mappings for the descriptors because
137 * otherwise virt_to_phys will give us bogus addresses in the
138 * virtqueue.
139 */
140 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
141
Michael S. Tsirkinbbd603e2010-04-29 17:26:37 +0300142 desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100143 if (!desc)
Michael S. Tsirkin686d3632010-06-10 18:16:11 +0300144 return -ENOMEM;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100145
146 /* Transfer entries from the sg list into the indirect page */
147 for (i = 0; i < out; i++) {
148 desc[i].flags = VRING_DESC_F_NEXT;
149 desc[i].addr = sg_phys(sg);
150 desc[i].len = sg->length;
151 desc[i].next = i+1;
152 sg++;
153 }
154 for (; i < (out + in); i++) {
155 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
156 desc[i].addr = sg_phys(sg);
157 desc[i].len = sg->length;
158 desc[i].next = i+1;
159 sg++;
160 }
161
162 /* Last one doesn't continue. */
163 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
164 desc[i-1].next = 0;
165
166 /* We're about to use a buffer */
167 vq->num_free--;
168
169 /* Use a single buffer which doesn't continue */
170 head = vq->free_head;
171 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
172 vq->vring.desc[head].addr = virt_to_phys(desc);
173 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
174
175 /* Update free pointer */
176 vq->free_head = vq->vring.desc[head].next;
177
178 return head;
179}
180
Rusty Russell5dfc1762012-01-12 15:44:42 +1030181/**
Rusty Russellf96fde42012-01-12 15:44:42 +1030182 * virtqueue_add_buf - expose buffer to other end
Rusty Russell5dfc1762012-01-12 15:44:42 +1030183 * @vq: the struct virtqueue we're talking about.
184 * @sg: the description of the buffer(s).
185 * @out_num: the number of sg readable by other side
186 * @in_num: the number of sg which are writable (after readable ones)
187 * @data: the token identifying the buffer.
188 * @gfp: how to do memory allocations (if necessary).
189 *
190 * Caller must ensure we don't call this with other virtqueue operations
191 * at the same time (except where noted).
192 *
193 * Returns remaining capacity of queue or a negative error
194 * (ie. ENOSPC). Note that it only really makes sense to treat all
195 * positive return values as "available": indirect buffers mean that
196 * we can put an entire sg[] array inside a single queue entry.
197 */
Rusty Russellf96fde42012-01-12 15:44:42 +1030198int virtqueue_add_buf(struct virtqueue *_vq,
199 struct scatterlist sg[],
200 unsigned int out,
201 unsigned int in,
202 void *data,
203 gfp_t gfp)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000204{
205 struct vring_virtqueue *vq = to_vvq(_vq);
Michael S. Tsirkin1fe9b6f2010-07-26 16:55:30 +0930206 unsigned int i, avail, uninitialized_var(prev);
207 int head;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000208
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100209 START_USE(vq);
210
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000211 BUG_ON(data == NULL);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100212
Rusty Russelle93300b2012-01-12 15:44:43 +1030213#ifdef DEBUG
214 {
215 ktime_t now = ktime_get();
216
217 /* No kick or get, with .1 second between? Warn. */
218 if (vq->last_add_time_valid)
219 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
220 > 100);
221 vq->last_add_time = now;
222 vq->last_add_time_valid = true;
223 }
224#endif
225
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100226 /* If the host supports indirect descriptor tables, and we have multiple
227 * buffers, then go indirect. FIXME: tune this threshold */
228 if (vq->indirect && (out + in) > 1 && vq->num_free) {
Michael S. Tsirkinbbd603e2010-04-29 17:26:37 +0300229 head = vring_add_indirect(vq, sg, out, in, gfp);
Michael S. Tsirkin1fe9b6f2010-07-26 16:55:30 +0930230 if (likely(head >= 0))
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100231 goto add_head;
232 }
233
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000234 BUG_ON(out + in > vq->vring.num);
235 BUG_ON(out + in == 0);
236
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000237 if (vq->num_free < out + in) {
238 pr_debug("Can't add buf len %i - avail = %i\n",
239 out + in, vq->num_free);
Rusty Russell44653ea2008-07-25 12:06:04 -0500240 /* FIXME: for historical reasons, we force a notify here if
241 * there are outgoing parts to the buffer. Presumably the
242 * host should service the ring ASAP. */
243 if (out)
244 vq->notify(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000245 END_USE(vq);
246 return -ENOSPC;
247 }
248
249 /* We're about to use some buffers from the free list. */
250 vq->num_free -= out + in;
251
252 head = vq->free_head;
253 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
254 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
Rusty Russell15f9c892008-02-04 23:50:05 -0500255 vq->vring.desc[i].addr = sg_phys(sg);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000256 vq->vring.desc[i].len = sg->length;
257 prev = i;
258 sg++;
259 }
260 for (; in; i = vq->vring.desc[i].next, in--) {
261 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
Rusty Russell15f9c892008-02-04 23:50:05 -0500262 vq->vring.desc[i].addr = sg_phys(sg);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000263 vq->vring.desc[i].len = sg->length;
264 prev = i;
265 sg++;
266 }
267 /* Last one doesn't continue. */
268 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
269
270 /* Update free pointer */
271 vq->free_head = i;
272
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100273add_head:
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000274 /* Set token. */
275 vq->data[head] = data;
276
277 /* Put entry in available array (but don't update avail->idx until they
Rusty Russell3b720b82012-01-12 15:44:43 +1030278 * do sync). */
Rusty Russellee7cd892012-01-12 15:44:43 +1030279 avail = (vq->vring.avail->idx & (vq->vring.num-1));
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000280 vq->vring.avail->ring[avail] = head;
281
Rusty Russellee7cd892012-01-12 15:44:43 +1030282 /* Descriptors and available array need to be set before we expose the
283 * new available array entries. */
284 virtio_wmb(vq);
285 vq->vring.avail->idx++;
286 vq->num_added++;
287
288 /* This is very unlikely, but theoretically possible. Kick
289 * just in case. */
290 if (unlikely(vq->num_added == (1 << 16) - 1))
291 virtqueue_kick(_vq);
292
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000293 pr_debug("Added buffer head %i to %p\n", head, vq);
294 END_USE(vq);
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600295
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600296 return vq->num_free;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000297}
Rusty Russellf96fde42012-01-12 15:44:42 +1030298EXPORT_SYMBOL_GPL(virtqueue_add_buf);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000299
Rusty Russell5dfc1762012-01-12 15:44:42 +1030300/**
Rusty Russell41f03772012-01-12 15:44:43 +1030301 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
Rusty Russell5dfc1762012-01-12 15:44:42 +1030302 * @vq: the struct virtqueue
303 *
Rusty Russell41f03772012-01-12 15:44:43 +1030304 * Instead of virtqueue_kick(), you can do:
305 * if (virtqueue_kick_prepare(vq))
306 * virtqueue_notify(vq);
Rusty Russell5dfc1762012-01-12 15:44:42 +1030307 *
Rusty Russell41f03772012-01-12 15:44:43 +1030308 * This is sometimes useful because the virtqueue_kick_prepare() needs
309 * to be serialized, but the actual virtqueue_notify() call does not.
Rusty Russell5dfc1762012-01-12 15:44:42 +1030310 */
Rusty Russell41f03772012-01-12 15:44:43 +1030311bool virtqueue_kick_prepare(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000312{
313 struct vring_virtqueue *vq = to_vvq(_vq);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300314 u16 new, old;
Rusty Russell41f03772012-01-12 15:44:43 +1030315 bool needs_kick;
316
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000317 START_USE(vq);
Jason Wanga72caae2012-01-20 16:17:08 +0800318 /* We need to expose available array entries before checking avail
319 * event. */
320 virtio_mb(vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000321
Rusty Russellee7cd892012-01-12 15:44:43 +1030322 old = vq->vring.avail->idx - vq->num_added;
323 new = vq->vring.avail->idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000324 vq->num_added = 0;
325
Rusty Russelle93300b2012-01-12 15:44:43 +1030326#ifdef DEBUG
327 if (vq->last_add_time_valid) {
328 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
329 vq->last_add_time)) > 100);
330 }
331 vq->last_add_time_valid = false;
332#endif
333
Rusty Russell41f03772012-01-12 15:44:43 +1030334 if (vq->event) {
335 needs_kick = vring_need_event(vring_avail_event(&vq->vring),
336 new, old);
337 } else {
338 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
339 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000340 END_USE(vq);
Rusty Russell41f03772012-01-12 15:44:43 +1030341 return needs_kick;
342}
343EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
344
345/**
346 * virtqueue_notify - second half of split virtqueue_kick call.
347 * @vq: the struct virtqueue
348 *
349 * This does not need to be serialized.
350 */
351void virtqueue_notify(struct virtqueue *_vq)
352{
353 struct vring_virtqueue *vq = to_vvq(_vq);
354
355 /* Prod other side to tell it about changes. */
356 vq->notify(_vq);
357}
358EXPORT_SYMBOL_GPL(virtqueue_notify);
359
360/**
361 * virtqueue_kick - update after add_buf
362 * @vq: the struct virtqueue
363 *
364 * After one or more virtqueue_add_buf calls, invoke this to kick
365 * the other side.
366 *
367 * Caller must ensure we don't call this with other virtqueue
368 * operations at the same time (except where noted).
369 */
370void virtqueue_kick(struct virtqueue *vq)
371{
372 if (virtqueue_kick_prepare(vq))
373 virtqueue_notify(vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000374}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300375EXPORT_SYMBOL_GPL(virtqueue_kick);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000376
377static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
378{
379 unsigned int i;
380
381 /* Clear data ptr. */
382 vq->data[head] = NULL;
383
384 /* Put back on free list: find end */
385 i = head;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100386
387 /* Free the indirect table */
388 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
389 kfree(phys_to_virt(vq->vring.desc[i].addr));
390
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000391 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
392 i = vq->vring.desc[i].next;
393 vq->num_free++;
394 }
395
396 vq->vring.desc[i].next = vq->free_head;
397 vq->free_head = head;
398 /* Plus final descriptor */
399 vq->num_free++;
400}
401
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000402static inline bool more_used(const struct vring_virtqueue *vq)
403{
404 return vq->last_used_idx != vq->vring.used->idx;
405}
406
Rusty Russell5dfc1762012-01-12 15:44:42 +1030407/**
408 * virtqueue_get_buf - get the next used buffer
409 * @vq: the struct virtqueue we're talking about.
410 * @len: the length written into the buffer
411 *
412 * If the driver wrote data into the buffer, @len will be set to the
413 * amount written. This means you don't need to clear the buffer
414 * beforehand to ensure there's no data leakage in the case of short
415 * writes.
416 *
417 * Caller must ensure we don't call this with other virtqueue
418 * operations at the same time (except where noted).
419 *
420 * Returns NULL if there are no used buffers, or the "data" token
Rusty Russellf96fde42012-01-12 15:44:42 +1030421 * handed to virtqueue_add_buf().
Rusty Russell5dfc1762012-01-12 15:44:42 +1030422 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300423void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000424{
425 struct vring_virtqueue *vq = to_vvq(_vq);
426 void *ret;
427 unsigned int i;
Rusty Russell3b720b82012-01-12 15:44:43 +1030428 u16 last_used;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000429
430 START_USE(vq);
431
Rusty Russell5ef82752008-05-02 21:50:43 -0500432 if (unlikely(vq->broken)) {
433 END_USE(vq);
434 return NULL;
435 }
436
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000437 if (!more_used(vq)) {
438 pr_debug("No more buffers in queue\n");
439 END_USE(vq);
440 return NULL;
441 }
442
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200443 /* Only get used array entries after they have been exposed by host. */
Rusty Russell7b21e342012-01-12 15:44:42 +1030444 virtio_rmb(vq);
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200445
Rusty Russell3b720b82012-01-12 15:44:43 +1030446 last_used = (vq->last_used_idx & (vq->vring.num - 1));
447 i = vq->vring.used->ring[last_used].id;
448 *len = vq->vring.used->ring[last_used].len;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000449
450 if (unlikely(i >= vq->vring.num)) {
451 BAD_RING(vq, "id %u out of range\n", i);
452 return NULL;
453 }
454 if (unlikely(!vq->data[i])) {
455 BAD_RING(vq, "id %u is not a head!\n", i);
456 return NULL;
457 }
458
459 /* detach_buf clears data, so grab it now. */
460 ret = vq->data[i];
461 detach_buf(vq, i);
462 vq->last_used_idx++;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300463 /* If we expect an interrupt for the next entry, tell host
464 * by writing event index and flush out the write before
465 * the read in the next get_buf call. */
466 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
467 vring_used_event(&vq->vring) = vq->last_used_idx;
Rusty Russell7b21e342012-01-12 15:44:42 +1030468 virtio_mb(vq);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300469 }
470
Rusty Russelle93300b2012-01-12 15:44:43 +1030471#ifdef DEBUG
472 vq->last_add_time_valid = false;
473#endif
474
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000475 END_USE(vq);
476 return ret;
477}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300478EXPORT_SYMBOL_GPL(virtqueue_get_buf);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000479
Rusty Russell5dfc1762012-01-12 15:44:42 +1030480/**
481 * virtqueue_disable_cb - disable callbacks
482 * @vq: the struct virtqueue we're talking about.
483 *
484 * Note that this is not necessarily synchronous, hence unreliable and only
485 * useful as an optimization.
486 *
487 * Unlike other operations, this need not be serialized.
488 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300489void virtqueue_disable_cb(struct virtqueue *_vq)
Rusty Russell18445c42008-02-04 23:49:57 -0500490{
491 struct vring_virtqueue *vq = to_vvq(_vq);
492
Rusty Russell18445c42008-02-04 23:49:57 -0500493 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
Rusty Russell18445c42008-02-04 23:49:57 -0500494}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300495EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
Rusty Russell18445c42008-02-04 23:49:57 -0500496
Rusty Russell5dfc1762012-01-12 15:44:42 +1030497/**
498 * virtqueue_enable_cb - restart callbacks after disable_cb.
499 * @vq: the struct virtqueue we're talking about.
500 *
Michael S. Tsirkin2010fa32013-07-09 13:19:18 +0300501 * This re-enables callbacks; it returns current queue state
502 * in an opaque unsigned value. This value should be later tested by
503 * virtqueue_poll, to detect a possible race between the driver checking for
504 * more work, and enabling callbacks.
Rusty Russell5dfc1762012-01-12 15:44:42 +1030505 *
506 * Caller must ensure we don't call this with other virtqueue
507 * operations at the same time (except where noted).
508 */
Michael S. Tsirkin2010fa32013-07-09 13:19:18 +0300509unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000510{
511 struct vring_virtqueue *vq = to_vvq(_vq);
Michael S. Tsirkin2010fa32013-07-09 13:19:18 +0300512 u16 last_used_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000513
514 START_USE(vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000515
516 /* We optimistically turn back on interrupts, then check if there was
517 * more to do. */
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300518 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
519 * either clear the flags bit or point the event index at the next
520 * entry. Always do both to keep code simple. */
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000521 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
Michael S. Tsirkin2010fa32013-07-09 13:19:18 +0300522 vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000523 END_USE(vq);
Michael S. Tsirkin2010fa32013-07-09 13:19:18 +0300524 return last_used_idx;
525}
526EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
527
528/**
529 * virtqueue_poll - query pending used buffers
530 * @vq: the struct virtqueue we're talking about.
531 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
532 *
533 * Returns "true" if there are pending used buffers in the queue.
534 *
535 * This does not need to be serialized.
536 */
537bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
538{
539 struct vring_virtqueue *vq = to_vvq(_vq);
540
541 virtio_mb(vq);
542 return (u16)last_used_idx != vq->vring.used->idx;
543}
544EXPORT_SYMBOL_GPL(virtqueue_poll);
545
546/**
547 * virtqueue_enable_cb - restart callbacks after disable_cb.
548 * @vq: the struct virtqueue we're talking about.
549 *
550 * This re-enables callbacks; it returns "false" if there are pending
551 * buffers in the queue, to detect a possible race between the driver
552 * checking for more work, and enabling callbacks.
553 *
554 * Caller must ensure we don't call this with other virtqueue
555 * operations at the same time (except where noted).
556 */
557bool virtqueue_enable_cb(struct virtqueue *_vq)
558{
559 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
560 return !virtqueue_poll(_vq, last_used_idx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000561}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300562EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000563
Rusty Russell5dfc1762012-01-12 15:44:42 +1030564/**
565 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
566 * @vq: the struct virtqueue we're talking about.
567 *
568 * This re-enables callbacks but hints to the other side to delay
569 * interrupts until most of the available buffers have been processed;
570 * it returns "false" if there are many pending buffers in the queue,
571 * to detect a possible race between the driver checking for more work,
572 * and enabling callbacks.
573 *
574 * Caller must ensure we don't call this with other virtqueue
575 * operations at the same time (except where noted).
576 */
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300577bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
578{
579 struct vring_virtqueue *vq = to_vvq(_vq);
580 u16 bufs;
581
582 START_USE(vq);
583
584 /* We optimistically turn back on interrupts, then check if there was
585 * more to do. */
586 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
587 * either clear the flags bit or point the event index at the next
588 * entry. Always do both to keep code simple. */
589 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
590 /* TODO: tune this threshold */
591 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
592 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
Rusty Russell7b21e342012-01-12 15:44:42 +1030593 virtio_mb(vq);
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300594 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
595 END_USE(vq);
596 return false;
597 }
598
599 END_USE(vq);
600 return true;
601}
602EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
603
Rusty Russell5dfc1762012-01-12 15:44:42 +1030604/**
605 * virtqueue_detach_unused_buf - detach first unused buffer
606 * @vq: the struct virtqueue we're talking about.
607 *
Rusty Russellf96fde42012-01-12 15:44:42 +1030608 * Returns NULL or the "data" token handed to virtqueue_add_buf().
Rusty Russell5dfc1762012-01-12 15:44:42 +1030609 * This is not valid on an active queue; it is useful only for device
610 * shutdown.
611 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300612void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
Shirley Mac021eac2010-01-18 19:15:23 +0530613{
614 struct vring_virtqueue *vq = to_vvq(_vq);
615 unsigned int i;
616 void *buf;
617
618 START_USE(vq);
619
620 for (i = 0; i < vq->vring.num; i++) {
621 if (!vq->data[i])
622 continue;
623 /* detach_buf clears data, so grab it now. */
624 buf = vq->data[i];
625 detach_buf(vq, i);
Amit Shahb3258ff2011-03-16 19:12:10 +0530626 vq->vring.avail->idx--;
Shirley Mac021eac2010-01-18 19:15:23 +0530627 END_USE(vq);
628 return buf;
629 }
630 /* That should have freed everything. */
631 BUG_ON(vq->num_free != vq->vring.num);
632
633 END_USE(vq);
634 return NULL;
635}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300636EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
Shirley Mac021eac2010-01-18 19:15:23 +0530637
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000638irqreturn_t vring_interrupt(int irq, void *_vq)
639{
640 struct vring_virtqueue *vq = to_vvq(_vq);
641
642 if (!more_used(vq)) {
643 pr_debug("virtqueue interrupt with no work for %p\n", vq);
644 return IRQ_NONE;
645 }
646
647 if (unlikely(vq->broken))
648 return IRQ_HANDLED;
649
650 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
Rusty Russell18445c42008-02-04 23:49:57 -0500651 if (vq->vq.callback)
652 vq->vq.callback(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000653
654 return IRQ_HANDLED;
655}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500656EXPORT_SYMBOL_GPL(vring_interrupt);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000657
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000658struct virtqueue *vring_new_virtqueue(unsigned int num,
Rusty Russell87c7d572008-12-30 09:26:03 -0600659 unsigned int vring_align,
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000660 struct virtio_device *vdev,
Rusty Russell7b21e342012-01-12 15:44:42 +1030661 bool weak_barriers,
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000662 void *pages,
663 void (*notify)(struct virtqueue *),
Rusty Russell9499f5e2009-06-12 22:16:35 -0600664 void (*callback)(struct virtqueue *),
665 const char *name)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000666{
667 struct vring_virtqueue *vq;
668 unsigned int i;
669
Rusty Russell42b36cc2007-11-12 13:39:18 +1100670 /* We assume num is a power of 2. */
671 if (num & (num - 1)) {
672 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
673 return NULL;
674 }
675
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000676 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
677 if (!vq)
678 return NULL;
679
Rusty Russell87c7d572008-12-30 09:26:03 -0600680 vring_init(&vq->vring, num, pages, vring_align);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000681 vq->vq.callback = callback;
682 vq->vq.vdev = vdev;
Rusty Russell9499f5e2009-06-12 22:16:35 -0600683 vq->vq.name = name;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000684 vq->notify = notify;
Rusty Russell7b21e342012-01-12 15:44:42 +1030685 vq->weak_barriers = weak_barriers;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000686 vq->broken = false;
687 vq->last_used_idx = 0;
688 vq->num_added = 0;
Rusty Russell9499f5e2009-06-12 22:16:35 -0600689 list_add_tail(&vq->vq.list, &vdev->vqs);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000690#ifdef DEBUG
691 vq->in_use = false;
Rusty Russelle93300b2012-01-12 15:44:43 +1030692 vq->last_add_time_valid = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000693#endif
694
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100695 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300696 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100697
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000698 /* No callback? Tell other side not to bother us. */
699 if (!callback)
700 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
701
702 /* Put everything in free lists. */
703 vq->num_free = num;
704 vq->free_head = 0;
Amit Shah3b870622010-02-12 10:32:14 +0530705 for (i = 0; i < num-1; i++) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000706 vq->vring.desc[i].next = i+1;
Amit Shah3b870622010-02-12 10:32:14 +0530707 vq->data[i] = NULL;
708 }
709 vq->data[i] = NULL;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000710
711 return &vq->vq;
712}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500713EXPORT_SYMBOL_GPL(vring_new_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000714
715void vring_del_virtqueue(struct virtqueue *vq)
716{
Rusty Russell9499f5e2009-06-12 22:16:35 -0600717 list_del(&vq->list);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000718 kfree(to_vvq(vq));
719}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500720EXPORT_SYMBOL_GPL(vring_del_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000721
Rusty Russelle34f8722008-07-25 12:06:13 -0500722/* Manipulates transport-specific feature bits. */
723void vring_transport_features(struct virtio_device *vdev)
724{
725 unsigned int i;
726
727 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
728 switch (i) {
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100729 case VIRTIO_RING_F_INDIRECT_DESC:
730 break;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300731 case VIRTIO_RING_F_EVENT_IDX:
732 break;
Rusty Russelle34f8722008-07-25 12:06:13 -0500733 default:
734 /* We don't understand this bit. */
735 clear_bit(i, vdev->features);
736 }
737 }
738}
739EXPORT_SYMBOL_GPL(vring_transport_features);
740
Rusty Russell5dfc1762012-01-12 15:44:42 +1030741/**
742 * virtqueue_get_vring_size - return the size of the virtqueue's vring
743 * @vq: the struct virtqueue containing the vring of interest.
744 *
745 * Returns the size of the vring. This is mainly used for boasting to
746 * userspace. Unlike other operations, this need not be serialized.
747 */
Rick Jones8f9f4662011-10-19 08:10:59 +0000748unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
749{
750
751 struct vring_virtqueue *vq = to_vvq(_vq);
752
753 return vq->vring.num;
754}
755EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
756
Rusty Russellc6fd4702008-02-04 23:50:05 -0500757MODULE_LICENSE("GPL");