blob: 286c30cb393d245dda6068d8df07efc0aa3699d8 [file] [log] [blame]
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
Rusty Russelle34f8722008-07-25 12:06:13 -050021#include <linux/virtio_config.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100022#include <linux/device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Paul Gortmakerb5a2c4f2011-07-03 16:20:30 -040024#include <linux/module.h>
Rusty Russelle93300b2012-01-12 15:44:43 +103025#include <linux/hrtimer.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100026
Michael S. Tsirkind57ed952010-01-28 00:42:23 +020027/* virtio guest is communicating with a virtual "device" that actually runs on
28 * a host processor. Memory barriers are used to control SMP effects. */
29#ifdef CONFIG_SMP
30/* Where possible, use SMP barriers which are more lightweight than mandatory
31 * barriers, because mandatory barriers control MMIO effects on accesses
Rusty Russell7b21e342012-01-12 15:44:42 +103032 * through relaxed memory I/O windows (which virtio-pci does not use). */
33#define virtio_mb(vq) \
34 do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
35#define virtio_rmb(vq) \
36 do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
37#define virtio_wmb(vq) \
Jason Wang4dbc5d92012-01-20 16:16:59 +080038 do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
Michael S. Tsirkind57ed952010-01-28 00:42:23 +020039#else
40/* We must force memory ordering even if guest is UP since host could be
41 * running on another CPU, but SMP barriers are defined to barrier() in that
42 * configuration. So fall back to mandatory barriers instead. */
Rusty Russell7b21e342012-01-12 15:44:42 +103043#define virtio_mb(vq) mb()
44#define virtio_rmb(vq) rmb()
45#define virtio_wmb(vq) wmb()
Michael S. Tsirkind57ed952010-01-28 00:42:23 +020046#endif
47
Rusty Russell0a8a69d2007-10-22 11:03:40 +100048#ifdef DEBUG
49/* For development, we want to crash whenever the ring is screwed. */
Rusty Russell9499f5e2009-06-12 22:16:35 -060050#define BAD_RING(_vq, fmt, args...) \
51 do { \
52 dev_err(&(_vq)->vq.vdev->dev, \
53 "%s:"fmt, (_vq)->vq.name, ##args); \
54 BUG(); \
55 } while (0)
Rusty Russellc5f841f2009-03-30 21:55:22 -060056/* Caller is supposed to guarantee no reentry. */
57#define START_USE(_vq) \
58 do { \
59 if ((_vq)->in_use) \
Rusty Russell9499f5e2009-06-12 22:16:35 -060060 panic("%s:in_use = %i\n", \
61 (_vq)->vq.name, (_vq)->in_use); \
Rusty Russellc5f841f2009-03-30 21:55:22 -060062 (_vq)->in_use = __LINE__; \
Rusty Russell9499f5e2009-06-12 22:16:35 -060063 } while (0)
Roel Kluin3a35ce72009-01-22 16:42:57 +010064#define END_USE(_vq) \
Rusty Russell97a545a2010-02-24 14:22:22 -060065 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100066#else
Rusty Russell9499f5e2009-06-12 22:16:35 -060067#define BAD_RING(_vq, fmt, args...) \
68 do { \
69 dev_err(&_vq->vq.vdev->dev, \
70 "%s:"fmt, (_vq)->vq.name, ##args); \
71 (_vq)->broken = true; \
72 } while (0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100073#define START_USE(vq)
74#define END_USE(vq)
75#endif
76
77struct vring_virtqueue
78{
79 struct virtqueue vq;
80
81 /* Actual memory layout for this queue */
82 struct vring vring;
83
Rusty Russell7b21e342012-01-12 15:44:42 +103084 /* Can we use weak barriers? */
85 bool weak_barriers;
86
Rusty Russell0a8a69d2007-10-22 11:03:40 +100087 /* Other side has made a mess, don't try any more. */
88 bool broken;
89
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +010090 /* Host supports indirect buffers */
91 bool indirect;
92
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +030093 /* Host publishes avail event idx */
94 bool event;
95
Rusty Russell0a8a69d2007-10-22 11:03:40 +100096 /* Number of free buffers */
97 unsigned int num_free;
98 /* Head of free buffer list. */
99 unsigned int free_head;
100 /* Number we've added since last sync. */
101 unsigned int num_added;
102
103 /* Last used index we've seen. */
Anthony Liguori1bc49532007-11-07 15:49:24 -0600104 u16 last_used_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000105
106 /* How to notify other side. FIXME: commonalize hcalls! */
107 void (*notify)(struct virtqueue *vq);
108
Jason Wang17bb6d42012-08-28 13:54:13 +0200109 /* Index of the queue */
110 int queue_index;
111
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000112#ifdef DEBUG
113 /* They're supposed to lock for us. */
114 unsigned int in_use;
Rusty Russelle93300b2012-01-12 15:44:43 +1030115
116 /* Figure out if their kicks are too delayed. */
117 bool last_add_time_valid;
118 ktime_t last_add_time;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000119#endif
120
121 /* Tokens for callbacks. */
122 void *data[];
123};
124
125#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
126
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100127/* Set up an indirect table of descriptors and add it to the queue. */
128static int vring_add_indirect(struct vring_virtqueue *vq,
129 struct scatterlist sg[],
130 unsigned int out,
Michael S. Tsirkinbbd603e2010-04-29 17:26:37 +0300131 unsigned int in,
132 gfp_t gfp)
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100133{
134 struct vring_desc *desc;
135 unsigned head;
136 int i;
137
Will Deaconb92b1b82012-10-19 14:03:33 +0100138 /*
139 * We require lowmem mappings for the descriptors because
140 * otherwise virt_to_phys will give us bogus addresses in the
141 * virtqueue.
142 */
143 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
144
Michael S. Tsirkinbbd603e2010-04-29 17:26:37 +0300145 desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100146 if (!desc)
Michael S. Tsirkin686d3632010-06-10 18:16:11 +0300147 return -ENOMEM;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100148
149 /* Transfer entries from the sg list into the indirect page */
150 for (i = 0; i < out; i++) {
151 desc[i].flags = VRING_DESC_F_NEXT;
152 desc[i].addr = sg_phys(sg);
153 desc[i].len = sg->length;
154 desc[i].next = i+1;
155 sg++;
156 }
157 for (; i < (out + in); i++) {
158 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
159 desc[i].addr = sg_phys(sg);
160 desc[i].len = sg->length;
161 desc[i].next = i+1;
162 sg++;
163 }
164
165 /* Last one doesn't continue. */
166 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
167 desc[i-1].next = 0;
168
169 /* We're about to use a buffer */
170 vq->num_free--;
171
172 /* Use a single buffer which doesn't continue */
173 head = vq->free_head;
174 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
175 vq->vring.desc[head].addr = virt_to_phys(desc);
176 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
177
178 /* Update free pointer */
179 vq->free_head = vq->vring.desc[head].next;
180
181 return head;
182}
183
Jason Wang17bb6d42012-08-28 13:54:13 +0200184int virtqueue_get_queue_index(struct virtqueue *_vq)
185{
186 struct vring_virtqueue *vq = to_vvq(_vq);
187 return vq->queue_index;
188}
189EXPORT_SYMBOL_GPL(virtqueue_get_queue_index);
190
Rusty Russell5dfc1762012-01-12 15:44:42 +1030191/**
Rusty Russellf96fde42012-01-12 15:44:42 +1030192 * virtqueue_add_buf - expose buffer to other end
Rusty Russell5dfc1762012-01-12 15:44:42 +1030193 * @vq: the struct virtqueue we're talking about.
194 * @sg: the description of the buffer(s).
195 * @out_num: the number of sg readable by other side
196 * @in_num: the number of sg which are writable (after readable ones)
197 * @data: the token identifying the buffer.
198 * @gfp: how to do memory allocations (if necessary).
199 *
200 * Caller must ensure we don't call this with other virtqueue operations
201 * at the same time (except where noted).
202 *
203 * Returns remaining capacity of queue or a negative error
204 * (ie. ENOSPC). Note that it only really makes sense to treat all
205 * positive return values as "available": indirect buffers mean that
206 * we can put an entire sg[] array inside a single queue entry.
207 */
Rusty Russellf96fde42012-01-12 15:44:42 +1030208int virtqueue_add_buf(struct virtqueue *_vq,
209 struct scatterlist sg[],
210 unsigned int out,
211 unsigned int in,
212 void *data,
213 gfp_t gfp)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000214{
215 struct vring_virtqueue *vq = to_vvq(_vq);
Michael S. Tsirkin1fe9b6f2010-07-26 16:55:30 +0930216 unsigned int i, avail, uninitialized_var(prev);
217 int head;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000218
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100219 START_USE(vq);
220
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000221 BUG_ON(data == NULL);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100222
Rusty Russelle93300b2012-01-12 15:44:43 +1030223#ifdef DEBUG
224 {
225 ktime_t now = ktime_get();
226
227 /* No kick or get, with .1 second between? Warn. */
228 if (vq->last_add_time_valid)
229 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
230 > 100);
231 vq->last_add_time = now;
232 vq->last_add_time_valid = true;
233 }
234#endif
235
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100236 /* If the host supports indirect descriptor tables, and we have multiple
237 * buffers, then go indirect. FIXME: tune this threshold */
238 if (vq->indirect && (out + in) > 1 && vq->num_free) {
Michael S. Tsirkinbbd603e2010-04-29 17:26:37 +0300239 head = vring_add_indirect(vq, sg, out, in, gfp);
Michael S. Tsirkin1fe9b6f2010-07-26 16:55:30 +0930240 if (likely(head >= 0))
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100241 goto add_head;
242 }
243
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000244 BUG_ON(out + in > vq->vring.num);
245 BUG_ON(out + in == 0);
246
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000247 if (vq->num_free < out + in) {
248 pr_debug("Can't add buf len %i - avail = %i\n",
249 out + in, vq->num_free);
Rusty Russell44653ea2008-07-25 12:06:04 -0500250 /* FIXME: for historical reasons, we force a notify here if
251 * there are outgoing parts to the buffer. Presumably the
252 * host should service the ring ASAP. */
253 if (out)
254 vq->notify(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000255 END_USE(vq);
256 return -ENOSPC;
257 }
258
259 /* We're about to use some buffers from the free list. */
260 vq->num_free -= out + in;
261
262 head = vq->free_head;
263 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
264 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
Rusty Russell15f9c892008-02-04 23:50:05 -0500265 vq->vring.desc[i].addr = sg_phys(sg);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000266 vq->vring.desc[i].len = sg->length;
267 prev = i;
268 sg++;
269 }
270 for (; in; i = vq->vring.desc[i].next, in--) {
271 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
Rusty Russell15f9c892008-02-04 23:50:05 -0500272 vq->vring.desc[i].addr = sg_phys(sg);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000273 vq->vring.desc[i].len = sg->length;
274 prev = i;
275 sg++;
276 }
277 /* Last one doesn't continue. */
278 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
279
280 /* Update free pointer */
281 vq->free_head = i;
282
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100283add_head:
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000284 /* Set token. */
285 vq->data[head] = data;
286
287 /* Put entry in available array (but don't update avail->idx until they
Rusty Russell3b720b82012-01-12 15:44:43 +1030288 * do sync). */
Rusty Russellee7cd892012-01-12 15:44:43 +1030289 avail = (vq->vring.avail->idx & (vq->vring.num-1));
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000290 vq->vring.avail->ring[avail] = head;
291
Rusty Russellee7cd892012-01-12 15:44:43 +1030292 /* Descriptors and available array need to be set before we expose the
293 * new available array entries. */
294 virtio_wmb(vq);
295 vq->vring.avail->idx++;
296 vq->num_added++;
297
298 /* This is very unlikely, but theoretically possible. Kick
299 * just in case. */
300 if (unlikely(vq->num_added == (1 << 16) - 1))
301 virtqueue_kick(_vq);
302
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000303 pr_debug("Added buffer head %i to %p\n", head, vq);
304 END_USE(vq);
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600305
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600306 return vq->num_free;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000307}
Rusty Russellf96fde42012-01-12 15:44:42 +1030308EXPORT_SYMBOL_GPL(virtqueue_add_buf);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000309
Rusty Russell5dfc1762012-01-12 15:44:42 +1030310/**
Rusty Russell41f03772012-01-12 15:44:43 +1030311 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
Rusty Russell5dfc1762012-01-12 15:44:42 +1030312 * @vq: the struct virtqueue
313 *
Rusty Russell41f03772012-01-12 15:44:43 +1030314 * Instead of virtqueue_kick(), you can do:
315 * if (virtqueue_kick_prepare(vq))
316 * virtqueue_notify(vq);
Rusty Russell5dfc1762012-01-12 15:44:42 +1030317 *
Rusty Russell41f03772012-01-12 15:44:43 +1030318 * This is sometimes useful because the virtqueue_kick_prepare() needs
319 * to be serialized, but the actual virtqueue_notify() call does not.
Rusty Russell5dfc1762012-01-12 15:44:42 +1030320 */
Rusty Russell41f03772012-01-12 15:44:43 +1030321bool virtqueue_kick_prepare(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000322{
323 struct vring_virtqueue *vq = to_vvq(_vq);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300324 u16 new, old;
Rusty Russell41f03772012-01-12 15:44:43 +1030325 bool needs_kick;
326
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000327 START_USE(vq);
Jason Wanga72caae2012-01-20 16:17:08 +0800328 /* We need to expose available array entries before checking avail
329 * event. */
330 virtio_mb(vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000331
Rusty Russellee7cd892012-01-12 15:44:43 +1030332 old = vq->vring.avail->idx - vq->num_added;
333 new = vq->vring.avail->idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000334 vq->num_added = 0;
335
Rusty Russelle93300b2012-01-12 15:44:43 +1030336#ifdef DEBUG
337 if (vq->last_add_time_valid) {
338 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
339 vq->last_add_time)) > 100);
340 }
341 vq->last_add_time_valid = false;
342#endif
343
Rusty Russell41f03772012-01-12 15:44:43 +1030344 if (vq->event) {
345 needs_kick = vring_need_event(vring_avail_event(&vq->vring),
346 new, old);
347 } else {
348 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
349 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000350 END_USE(vq);
Rusty Russell41f03772012-01-12 15:44:43 +1030351 return needs_kick;
352}
353EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
354
355/**
356 * virtqueue_notify - second half of split virtqueue_kick call.
357 * @vq: the struct virtqueue
358 *
359 * This does not need to be serialized.
360 */
361void virtqueue_notify(struct virtqueue *_vq)
362{
363 struct vring_virtqueue *vq = to_vvq(_vq);
364
365 /* Prod other side to tell it about changes. */
366 vq->notify(_vq);
367}
368EXPORT_SYMBOL_GPL(virtqueue_notify);
369
370/**
371 * virtqueue_kick - update after add_buf
372 * @vq: the struct virtqueue
373 *
374 * After one or more virtqueue_add_buf calls, invoke this to kick
375 * the other side.
376 *
377 * Caller must ensure we don't call this with other virtqueue
378 * operations at the same time (except where noted).
379 */
380void virtqueue_kick(struct virtqueue *vq)
381{
382 if (virtqueue_kick_prepare(vq))
383 virtqueue_notify(vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000384}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300385EXPORT_SYMBOL_GPL(virtqueue_kick);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000386
387static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
388{
389 unsigned int i;
390
391 /* Clear data ptr. */
392 vq->data[head] = NULL;
393
394 /* Put back on free list: find end */
395 i = head;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100396
397 /* Free the indirect table */
398 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
399 kfree(phys_to_virt(vq->vring.desc[i].addr));
400
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000401 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
402 i = vq->vring.desc[i].next;
403 vq->num_free++;
404 }
405
406 vq->vring.desc[i].next = vq->free_head;
407 vq->free_head = head;
408 /* Plus final descriptor */
409 vq->num_free++;
410}
411
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000412static inline bool more_used(const struct vring_virtqueue *vq)
413{
414 return vq->last_used_idx != vq->vring.used->idx;
415}
416
Rusty Russell5dfc1762012-01-12 15:44:42 +1030417/**
418 * virtqueue_get_buf - get the next used buffer
419 * @vq: the struct virtqueue we're talking about.
420 * @len: the length written into the buffer
421 *
422 * If the driver wrote data into the buffer, @len will be set to the
423 * amount written. This means you don't need to clear the buffer
424 * beforehand to ensure there's no data leakage in the case of short
425 * writes.
426 *
427 * Caller must ensure we don't call this with other virtqueue
428 * operations at the same time (except where noted).
429 *
430 * Returns NULL if there are no used buffers, or the "data" token
Rusty Russellf96fde42012-01-12 15:44:42 +1030431 * handed to virtqueue_add_buf().
Rusty Russell5dfc1762012-01-12 15:44:42 +1030432 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300433void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000434{
435 struct vring_virtqueue *vq = to_vvq(_vq);
436 void *ret;
437 unsigned int i;
Rusty Russell3b720b82012-01-12 15:44:43 +1030438 u16 last_used;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000439
440 START_USE(vq);
441
Rusty Russell5ef82752008-05-02 21:50:43 -0500442 if (unlikely(vq->broken)) {
443 END_USE(vq);
444 return NULL;
445 }
446
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000447 if (!more_used(vq)) {
448 pr_debug("No more buffers in queue\n");
449 END_USE(vq);
450 return NULL;
451 }
452
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200453 /* Only get used array entries after they have been exposed by host. */
Rusty Russell7b21e342012-01-12 15:44:42 +1030454 virtio_rmb(vq);
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200455
Rusty Russell3b720b82012-01-12 15:44:43 +1030456 last_used = (vq->last_used_idx & (vq->vring.num - 1));
457 i = vq->vring.used->ring[last_used].id;
458 *len = vq->vring.used->ring[last_used].len;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000459
460 if (unlikely(i >= vq->vring.num)) {
461 BAD_RING(vq, "id %u out of range\n", i);
462 return NULL;
463 }
464 if (unlikely(!vq->data[i])) {
465 BAD_RING(vq, "id %u is not a head!\n", i);
466 return NULL;
467 }
468
469 /* detach_buf clears data, so grab it now. */
470 ret = vq->data[i];
471 detach_buf(vq, i);
472 vq->last_used_idx++;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300473 /* If we expect an interrupt for the next entry, tell host
474 * by writing event index and flush out the write before
475 * the read in the next get_buf call. */
476 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
477 vring_used_event(&vq->vring) = vq->last_used_idx;
Rusty Russell7b21e342012-01-12 15:44:42 +1030478 virtio_mb(vq);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300479 }
480
Rusty Russelle93300b2012-01-12 15:44:43 +1030481#ifdef DEBUG
482 vq->last_add_time_valid = false;
483#endif
484
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000485 END_USE(vq);
486 return ret;
487}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300488EXPORT_SYMBOL_GPL(virtqueue_get_buf);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000489
Rusty Russell5dfc1762012-01-12 15:44:42 +1030490/**
491 * virtqueue_disable_cb - disable callbacks
492 * @vq: the struct virtqueue we're talking about.
493 *
494 * Note that this is not necessarily synchronous, hence unreliable and only
495 * useful as an optimization.
496 *
497 * Unlike other operations, this need not be serialized.
498 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300499void virtqueue_disable_cb(struct virtqueue *_vq)
Rusty Russell18445c42008-02-04 23:49:57 -0500500{
501 struct vring_virtqueue *vq = to_vvq(_vq);
502
Rusty Russell18445c42008-02-04 23:49:57 -0500503 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
Rusty Russell18445c42008-02-04 23:49:57 -0500504}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300505EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
Rusty Russell18445c42008-02-04 23:49:57 -0500506
Rusty Russell5dfc1762012-01-12 15:44:42 +1030507/**
508 * virtqueue_enable_cb - restart callbacks after disable_cb.
509 * @vq: the struct virtqueue we're talking about.
510 *
511 * This re-enables callbacks; it returns "false" if there are pending
512 * buffers in the queue, to detect a possible race between the driver
513 * checking for more work, and enabling callbacks.
514 *
515 * Caller must ensure we don't call this with other virtqueue
516 * operations at the same time (except where noted).
517 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300518bool virtqueue_enable_cb(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000519{
520 struct vring_virtqueue *vq = to_vvq(_vq);
521
522 START_USE(vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000523
524 /* We optimistically turn back on interrupts, then check if there was
525 * more to do. */
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300526 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
527 * either clear the flags bit or point the event index at the next
528 * entry. Always do both to keep code simple. */
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000529 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300530 vring_used_event(&vq->vring) = vq->last_used_idx;
Rusty Russell7b21e342012-01-12 15:44:42 +1030531 virtio_mb(vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000532 if (unlikely(more_used(vq))) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000533 END_USE(vq);
534 return false;
535 }
536
537 END_USE(vq);
538 return true;
539}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300540EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000541
Rusty Russell5dfc1762012-01-12 15:44:42 +1030542/**
543 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
544 * @vq: the struct virtqueue we're talking about.
545 *
546 * This re-enables callbacks but hints to the other side to delay
547 * interrupts until most of the available buffers have been processed;
548 * it returns "false" if there are many pending buffers in the queue,
549 * to detect a possible race between the driver checking for more work,
550 * and enabling callbacks.
551 *
552 * Caller must ensure we don't call this with other virtqueue
553 * operations at the same time (except where noted).
554 */
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300555bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
556{
557 struct vring_virtqueue *vq = to_vvq(_vq);
558 u16 bufs;
559
560 START_USE(vq);
561
562 /* We optimistically turn back on interrupts, then check if there was
563 * more to do. */
564 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
565 * either clear the flags bit or point the event index at the next
566 * entry. Always do both to keep code simple. */
567 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
568 /* TODO: tune this threshold */
569 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
570 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
Rusty Russell7b21e342012-01-12 15:44:42 +1030571 virtio_mb(vq);
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300572 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
573 END_USE(vq);
574 return false;
575 }
576
577 END_USE(vq);
578 return true;
579}
580EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
581
Rusty Russell5dfc1762012-01-12 15:44:42 +1030582/**
583 * virtqueue_detach_unused_buf - detach first unused buffer
584 * @vq: the struct virtqueue we're talking about.
585 *
Rusty Russellf96fde42012-01-12 15:44:42 +1030586 * Returns NULL or the "data" token handed to virtqueue_add_buf().
Rusty Russell5dfc1762012-01-12 15:44:42 +1030587 * This is not valid on an active queue; it is useful only for device
588 * shutdown.
589 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300590void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
Shirley Mac021eac2010-01-18 19:15:23 +0530591{
592 struct vring_virtqueue *vq = to_vvq(_vq);
593 unsigned int i;
594 void *buf;
595
596 START_USE(vq);
597
598 for (i = 0; i < vq->vring.num; i++) {
599 if (!vq->data[i])
600 continue;
601 /* detach_buf clears data, so grab it now. */
602 buf = vq->data[i];
603 detach_buf(vq, i);
Amit Shahb3258ff2011-03-16 19:12:10 +0530604 vq->vring.avail->idx--;
Shirley Mac021eac2010-01-18 19:15:23 +0530605 END_USE(vq);
606 return buf;
607 }
608 /* That should have freed everything. */
609 BUG_ON(vq->num_free != vq->vring.num);
610
611 END_USE(vq);
612 return NULL;
613}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300614EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
Shirley Mac021eac2010-01-18 19:15:23 +0530615
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000616irqreturn_t vring_interrupt(int irq, void *_vq)
617{
618 struct vring_virtqueue *vq = to_vvq(_vq);
619
620 if (!more_used(vq)) {
621 pr_debug("virtqueue interrupt with no work for %p\n", vq);
622 return IRQ_NONE;
623 }
624
625 if (unlikely(vq->broken))
626 return IRQ_HANDLED;
627
628 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
Rusty Russell18445c42008-02-04 23:49:57 -0500629 if (vq->vq.callback)
630 vq->vq.callback(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000631
632 return IRQ_HANDLED;
633}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500634EXPORT_SYMBOL_GPL(vring_interrupt);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000635
Jason Wang17bb6d42012-08-28 13:54:13 +0200636struct virtqueue *vring_new_virtqueue(unsigned int index,
637 unsigned int num,
Rusty Russell87c7d572008-12-30 09:26:03 -0600638 unsigned int vring_align,
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000639 struct virtio_device *vdev,
Rusty Russell7b21e342012-01-12 15:44:42 +1030640 bool weak_barriers,
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000641 void *pages,
642 void (*notify)(struct virtqueue *),
Rusty Russell9499f5e2009-06-12 22:16:35 -0600643 void (*callback)(struct virtqueue *),
644 const char *name)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000645{
646 struct vring_virtqueue *vq;
647 unsigned int i;
648
Rusty Russell42b36cc2007-11-12 13:39:18 +1100649 /* We assume num is a power of 2. */
650 if (num & (num - 1)) {
651 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
652 return NULL;
653 }
654
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000655 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
656 if (!vq)
657 return NULL;
658
Rusty Russell87c7d572008-12-30 09:26:03 -0600659 vring_init(&vq->vring, num, pages, vring_align);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000660 vq->vq.callback = callback;
661 vq->vq.vdev = vdev;
Rusty Russell9499f5e2009-06-12 22:16:35 -0600662 vq->vq.name = name;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000663 vq->notify = notify;
Rusty Russell7b21e342012-01-12 15:44:42 +1030664 vq->weak_barriers = weak_barriers;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000665 vq->broken = false;
666 vq->last_used_idx = 0;
667 vq->num_added = 0;
Jason Wang17bb6d42012-08-28 13:54:13 +0200668 vq->queue_index = index;
Rusty Russell9499f5e2009-06-12 22:16:35 -0600669 list_add_tail(&vq->vq.list, &vdev->vqs);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000670#ifdef DEBUG
671 vq->in_use = false;
Rusty Russelle93300b2012-01-12 15:44:43 +1030672 vq->last_add_time_valid = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000673#endif
674
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100675 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300676 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100677
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000678 /* No callback? Tell other side not to bother us. */
679 if (!callback)
680 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
681
682 /* Put everything in free lists. */
683 vq->num_free = num;
684 vq->free_head = 0;
Amit Shah3b870622010-02-12 10:32:14 +0530685 for (i = 0; i < num-1; i++) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000686 vq->vring.desc[i].next = i+1;
Amit Shah3b870622010-02-12 10:32:14 +0530687 vq->data[i] = NULL;
688 }
689 vq->data[i] = NULL;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000690
691 return &vq->vq;
692}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500693EXPORT_SYMBOL_GPL(vring_new_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000694
695void vring_del_virtqueue(struct virtqueue *vq)
696{
Rusty Russell9499f5e2009-06-12 22:16:35 -0600697 list_del(&vq->list);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000698 kfree(to_vvq(vq));
699}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500700EXPORT_SYMBOL_GPL(vring_del_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000701
Rusty Russelle34f8722008-07-25 12:06:13 -0500702/* Manipulates transport-specific feature bits. */
703void vring_transport_features(struct virtio_device *vdev)
704{
705 unsigned int i;
706
707 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
708 switch (i) {
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100709 case VIRTIO_RING_F_INDIRECT_DESC:
710 break;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300711 case VIRTIO_RING_F_EVENT_IDX:
712 break;
Rusty Russelle34f8722008-07-25 12:06:13 -0500713 default:
714 /* We don't understand this bit. */
715 clear_bit(i, vdev->features);
716 }
717 }
718}
719EXPORT_SYMBOL_GPL(vring_transport_features);
720
Rusty Russell5dfc1762012-01-12 15:44:42 +1030721/**
722 * virtqueue_get_vring_size - return the size of the virtqueue's vring
723 * @vq: the struct virtqueue containing the vring of interest.
724 *
725 * Returns the size of the vring. This is mainly used for boasting to
726 * userspace. Unlike other operations, this need not be serialized.
727 */
Rick Jones8f9f4662011-10-19 08:10:59 +0000728unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
729{
730
731 struct vring_virtqueue *vq = to_vvq(_vq);
732
733 return vq->vring.num;
734}
735EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
736
Rusty Russellc6fd4702008-02-04 23:50:05 -0500737MODULE_LICENSE("GPL");