| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 1 | /* Virtio ring implementation. | 
 | 2 |  * | 
 | 3 |  *  Copyright 2007 Rusty Russell IBM Corporation | 
 | 4 |  * | 
 | 5 |  *  This program is free software; you can redistribute it and/or modify | 
 | 6 |  *  it under the terms of the GNU General Public License as published by | 
 | 7 |  *  the Free Software Foundation; either version 2 of the License, or | 
 | 8 |  *  (at your option) any later version. | 
 | 9 |  * | 
 | 10 |  *  This program is distributed in the hope that it will be useful, | 
 | 11 |  *  but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 12 |  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 13 |  *  GNU General Public License for more details. | 
 | 14 |  * | 
 | 15 |  *  You should have received a copy of the GNU General Public License | 
 | 16 |  *  along with this program; if not, write to the Free Software | 
 | 17 |  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
 | 18 |  */ | 
 | 19 | #include <linux/virtio.h> | 
 | 20 | #include <linux/virtio_ring.h> | 
| Rusty Russell | e34f872 | 2008-07-25 12:06:13 -0500 | [diff] [blame] | 21 | #include <linux/virtio_config.h> | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 22 | #include <linux/device.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 23 | #include <linux/slab.h> | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 24 |  | 
| Michael S. Tsirkin | d57ed95 | 2010-01-28 00:42:23 +0200 | [diff] [blame] | 25 | /* virtio guest is communicating with a virtual "device" that actually runs on | 
 | 26 |  * a host processor.  Memory barriers are used to control SMP effects. */ | 
 | 27 | #ifdef CONFIG_SMP | 
 | 28 | /* Where possible, use SMP barriers which are more lightweight than mandatory | 
 | 29 |  * barriers, because mandatory barriers control MMIO effects on accesses | 
 | 30 |  * through relaxed memory I/O windows (which virtio does not use). */ | 
 | 31 | #define virtio_mb() smp_mb() | 
 | 32 | #define virtio_rmb() smp_rmb() | 
 | 33 | #define virtio_wmb() smp_wmb() | 
 | 34 | #else | 
 | 35 | /* We must force memory ordering even if guest is UP since host could be | 
 | 36 |  * running on another CPU, but SMP barriers are defined to barrier() in that | 
 | 37 |  * configuration. So fall back to mandatory barriers instead. */ | 
 | 38 | #define virtio_mb() mb() | 
 | 39 | #define virtio_rmb() rmb() | 
 | 40 | #define virtio_wmb() wmb() | 
 | 41 | #endif | 
 | 42 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 43 | #ifdef DEBUG | 
 | 44 | /* For development, we want to crash whenever the ring is screwed. */ | 
| Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 45 | #define BAD_RING(_vq, fmt, args...)				\ | 
 | 46 | 	do {							\ | 
 | 47 | 		dev_err(&(_vq)->vq.vdev->dev,			\ | 
 | 48 | 			"%s:"fmt, (_vq)->vq.name, ##args);	\ | 
 | 49 | 		BUG();						\ | 
 | 50 | 	} while (0) | 
| Rusty Russell | c5f841f | 2009-03-30 21:55:22 -0600 | [diff] [blame] | 51 | /* Caller is supposed to guarantee no reentry. */ | 
 | 52 | #define START_USE(_vq)						\ | 
 | 53 | 	do {							\ | 
 | 54 | 		if ((_vq)->in_use)				\ | 
| Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 55 | 			panic("%s:in_use = %i\n",		\ | 
 | 56 | 			      (_vq)->vq.name, (_vq)->in_use);	\ | 
| Rusty Russell | c5f841f | 2009-03-30 21:55:22 -0600 | [diff] [blame] | 57 | 		(_vq)->in_use = __LINE__;			\ | 
| Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 58 | 	} while (0) | 
| Roel Kluin | 3a35ce7 | 2009-01-22 16:42:57 +0100 | [diff] [blame] | 59 | #define END_USE(_vq) \ | 
| Rusty Russell | 97a545a | 2010-02-24 14:22:22 -0600 | [diff] [blame] | 60 | 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 61 | #else | 
| Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 62 | #define BAD_RING(_vq, fmt, args...)				\ | 
 | 63 | 	do {							\ | 
 | 64 | 		dev_err(&_vq->vq.vdev->dev,			\ | 
 | 65 | 			"%s:"fmt, (_vq)->vq.name, ##args);	\ | 
 | 66 | 		(_vq)->broken = true;				\ | 
 | 67 | 	} while (0) | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 68 | #define START_USE(vq) | 
 | 69 | #define END_USE(vq) | 
 | 70 | #endif | 
 | 71 |  | 
 | 72 | struct vring_virtqueue | 
 | 73 | { | 
 | 74 | 	struct virtqueue vq; | 
 | 75 |  | 
 | 76 | 	/* Actual memory layout for this queue */ | 
 | 77 | 	struct vring vring; | 
 | 78 |  | 
 | 79 | 	/* Other side has made a mess, don't try any more. */ | 
 | 80 | 	bool broken; | 
 | 81 |  | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 82 | 	/* Host supports indirect buffers */ | 
 | 83 | 	bool indirect; | 
 | 84 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 85 | 	/* Number of free buffers */ | 
 | 86 | 	unsigned int num_free; | 
 | 87 | 	/* Head of free buffer list. */ | 
 | 88 | 	unsigned int free_head; | 
 | 89 | 	/* Number we've added since last sync. */ | 
 | 90 | 	unsigned int num_added; | 
 | 91 |  | 
 | 92 | 	/* Last used index we've seen. */ | 
| Anthony Liguori | 1bc4953 | 2007-11-07 15:49:24 -0600 | [diff] [blame] | 93 | 	u16 last_used_idx; | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 94 |  | 
 | 95 | 	/* How to notify other side. FIXME: commonalize hcalls! */ | 
 | 96 | 	void (*notify)(struct virtqueue *vq); | 
 | 97 |  | 
 | 98 | #ifdef DEBUG | 
 | 99 | 	/* They're supposed to lock for us. */ | 
 | 100 | 	unsigned int in_use; | 
 | 101 | #endif | 
 | 102 |  | 
 | 103 | 	/* Tokens for callbacks. */ | 
 | 104 | 	void *data[]; | 
 | 105 | }; | 
 | 106 |  | 
 | 107 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) | 
 | 108 |  | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 109 | /* Set up an indirect table of descriptors and add it to the queue. */ | 
 | 110 | static int vring_add_indirect(struct vring_virtqueue *vq, | 
 | 111 | 			      struct scatterlist sg[], | 
 | 112 | 			      unsigned int out, | 
| Michael S. Tsirkin | bbd603e | 2010-04-29 17:26:37 +0300 | [diff] [blame] | 113 | 			      unsigned int in, | 
 | 114 | 			      gfp_t gfp) | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 115 | { | 
 | 116 | 	struct vring_desc *desc; | 
 | 117 | 	unsigned head; | 
 | 118 | 	int i; | 
 | 119 |  | 
| Michael S. Tsirkin | bbd603e | 2010-04-29 17:26:37 +0300 | [diff] [blame] | 120 | 	desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 121 | 	if (!desc) | 
| Michael S. Tsirkin | 686d363 | 2010-06-10 18:16:11 +0300 | [diff] [blame] | 122 | 		return -ENOMEM; | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 123 |  | 
 | 124 | 	/* Transfer entries from the sg list into the indirect page */ | 
 | 125 | 	for (i = 0; i < out; i++) { | 
 | 126 | 		desc[i].flags = VRING_DESC_F_NEXT; | 
 | 127 | 		desc[i].addr = sg_phys(sg); | 
 | 128 | 		desc[i].len = sg->length; | 
 | 129 | 		desc[i].next = i+1; | 
 | 130 | 		sg++; | 
 | 131 | 	} | 
 | 132 | 	for (; i < (out + in); i++) { | 
 | 133 | 		desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; | 
 | 134 | 		desc[i].addr = sg_phys(sg); | 
 | 135 | 		desc[i].len = sg->length; | 
 | 136 | 		desc[i].next = i+1; | 
 | 137 | 		sg++; | 
 | 138 | 	} | 
 | 139 |  | 
 | 140 | 	/* Last one doesn't continue. */ | 
 | 141 | 	desc[i-1].flags &= ~VRING_DESC_F_NEXT; | 
 | 142 | 	desc[i-1].next = 0; | 
 | 143 |  | 
 | 144 | 	/* We're about to use a buffer */ | 
 | 145 | 	vq->num_free--; | 
 | 146 |  | 
 | 147 | 	/* Use a single buffer which doesn't continue */ | 
 | 148 | 	head = vq->free_head; | 
 | 149 | 	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; | 
 | 150 | 	vq->vring.desc[head].addr = virt_to_phys(desc); | 
 | 151 | 	vq->vring.desc[head].len = i * sizeof(struct vring_desc); | 
 | 152 |  | 
 | 153 | 	/* Update free pointer */ | 
 | 154 | 	vq->free_head = vq->vring.desc[head].next; | 
 | 155 |  | 
 | 156 | 	return head; | 
 | 157 | } | 
 | 158 |  | 
| Michael S. Tsirkin | bbd603e | 2010-04-29 17:26:37 +0300 | [diff] [blame] | 159 | int virtqueue_add_buf_gfp(struct virtqueue *_vq, | 
 | 160 | 			  struct scatterlist sg[], | 
 | 161 | 			  unsigned int out, | 
 | 162 | 			  unsigned int in, | 
 | 163 | 			  void *data, | 
 | 164 | 			  gfp_t gfp) | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 165 | { | 
 | 166 | 	struct vring_virtqueue *vq = to_vvq(_vq); | 
| Michael S. Tsirkin | 1fe9b6f | 2010-07-26 16:55:30 +0930 | [diff] [blame] | 167 | 	unsigned int i, avail, uninitialized_var(prev); | 
 | 168 | 	int head; | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 169 |  | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 170 | 	START_USE(vq); | 
 | 171 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 172 | 	BUG_ON(data == NULL); | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 173 |  | 
 | 174 | 	/* If the host supports indirect descriptor tables, and we have multiple | 
 | 175 | 	 * buffers, then go indirect. FIXME: tune this threshold */ | 
 | 176 | 	if (vq->indirect && (out + in) > 1 && vq->num_free) { | 
| Michael S. Tsirkin | bbd603e | 2010-04-29 17:26:37 +0300 | [diff] [blame] | 177 | 		head = vring_add_indirect(vq, sg, out, in, gfp); | 
| Michael S. Tsirkin | 1fe9b6f | 2010-07-26 16:55:30 +0930 | [diff] [blame] | 178 | 		if (likely(head >= 0)) | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 179 | 			goto add_head; | 
 | 180 | 	} | 
 | 181 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 182 | 	BUG_ON(out + in > vq->vring.num); | 
 | 183 | 	BUG_ON(out + in == 0); | 
 | 184 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 185 | 	if (vq->num_free < out + in) { | 
 | 186 | 		pr_debug("Can't add buf len %i - avail = %i\n", | 
 | 187 | 			 out + in, vq->num_free); | 
| Rusty Russell | 44653ea | 2008-07-25 12:06:04 -0500 | [diff] [blame] | 188 | 		/* FIXME: for historical reasons, we force a notify here if | 
 | 189 | 		 * there are outgoing parts to the buffer.  Presumably the | 
 | 190 | 		 * host should service the ring ASAP. */ | 
 | 191 | 		if (out) | 
 | 192 | 			vq->notify(&vq->vq); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 193 | 		END_USE(vq); | 
 | 194 | 		return -ENOSPC; | 
 | 195 | 	} | 
 | 196 |  | 
 | 197 | 	/* We're about to use some buffers from the free list. */ | 
 | 198 | 	vq->num_free -= out + in; | 
 | 199 |  | 
 | 200 | 	head = vq->free_head; | 
 | 201 | 	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { | 
 | 202 | 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT; | 
| Rusty Russell | 15f9c89 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 203 | 		vq->vring.desc[i].addr = sg_phys(sg); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 204 | 		vq->vring.desc[i].len = sg->length; | 
 | 205 | 		prev = i; | 
 | 206 | 		sg++; | 
 | 207 | 	} | 
 | 208 | 	for (; in; i = vq->vring.desc[i].next, in--) { | 
 | 209 | 		vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; | 
| Rusty Russell | 15f9c89 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 210 | 		vq->vring.desc[i].addr = sg_phys(sg); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 211 | 		vq->vring.desc[i].len = sg->length; | 
 | 212 | 		prev = i; | 
 | 213 | 		sg++; | 
 | 214 | 	} | 
 | 215 | 	/* Last one doesn't continue. */ | 
 | 216 | 	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; | 
 | 217 |  | 
 | 218 | 	/* Update free pointer */ | 
 | 219 | 	vq->free_head = i; | 
 | 220 |  | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 221 | add_head: | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 222 | 	/* Set token. */ | 
 | 223 | 	vq->data[head] = data; | 
 | 224 |  | 
 | 225 | 	/* Put entry in available array (but don't update avail->idx until they | 
 | 226 | 	 * do sync).  FIXME: avoid modulus here? */ | 
 | 227 | 	avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num; | 
 | 228 | 	vq->vring.avail->ring[avail] = head; | 
 | 229 |  | 
 | 230 | 	pr_debug("Added buffer head %i to %p\n", head, vq); | 
 | 231 | 	END_USE(vq); | 
| Rusty Russell | 3c1b27d | 2009-09-23 22:26:31 -0600 | [diff] [blame] | 232 |  | 
 | 233 | 	/* If we're indirect, we can fit many (assuming not OOM). */ | 
 | 234 | 	if (vq->indirect) | 
 | 235 | 		return vq->num_free ? vq->vring.num : 0; | 
 | 236 | 	return vq->num_free; | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 237 | } | 
| Michael S. Tsirkin | bbd603e | 2010-04-29 17:26:37 +0300 | [diff] [blame] | 238 | EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 239 |  | 
| Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 240 | void virtqueue_kick(struct virtqueue *_vq) | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 241 | { | 
 | 242 | 	struct vring_virtqueue *vq = to_vvq(_vq); | 
 | 243 | 	START_USE(vq); | 
 | 244 | 	/* Descriptors and available array need to be set before we expose the | 
 | 245 | 	 * new available array entries. */ | 
| Michael S. Tsirkin | d57ed95 | 2010-01-28 00:42:23 +0200 | [diff] [blame] | 246 | 	virtio_wmb(); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 247 |  | 
 | 248 | 	vq->vring.avail->idx += vq->num_added; | 
 | 249 | 	vq->num_added = 0; | 
 | 250 |  | 
 | 251 | 	/* Need to update avail index before checking if we should notify */ | 
| Michael S. Tsirkin | d57ed95 | 2010-01-28 00:42:23 +0200 | [diff] [blame] | 252 | 	virtio_mb(); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 253 |  | 
 | 254 | 	if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) | 
 | 255 | 		/* Prod other side to tell it about changes. */ | 
 | 256 | 		vq->notify(&vq->vq); | 
 | 257 |  | 
 | 258 | 	END_USE(vq); | 
 | 259 | } | 
| Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 260 | EXPORT_SYMBOL_GPL(virtqueue_kick); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 261 |  | 
 | 262 | static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | 
 | 263 | { | 
 | 264 | 	unsigned int i; | 
 | 265 |  | 
 | 266 | 	/* Clear data ptr. */ | 
 | 267 | 	vq->data[head] = NULL; | 
 | 268 |  | 
 | 269 | 	/* Put back on free list: find end */ | 
 | 270 | 	i = head; | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 271 |  | 
 | 272 | 	/* Free the indirect table */ | 
 | 273 | 	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) | 
 | 274 | 		kfree(phys_to_virt(vq->vring.desc[i].addr)); | 
 | 275 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 276 | 	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { | 
 | 277 | 		i = vq->vring.desc[i].next; | 
 | 278 | 		vq->num_free++; | 
 | 279 | 	} | 
 | 280 |  | 
 | 281 | 	vq->vring.desc[i].next = vq->free_head; | 
 | 282 | 	vq->free_head = head; | 
 | 283 | 	/* Plus final descriptor */ | 
 | 284 | 	vq->num_free++; | 
 | 285 | } | 
 | 286 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 287 | static inline bool more_used(const struct vring_virtqueue *vq) | 
 | 288 | { | 
 | 289 | 	return vq->last_used_idx != vq->vring.used->idx; | 
 | 290 | } | 
 | 291 |  | 
| Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 292 | void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 293 | { | 
 | 294 | 	struct vring_virtqueue *vq = to_vvq(_vq); | 
 | 295 | 	void *ret; | 
 | 296 | 	unsigned int i; | 
 | 297 |  | 
 | 298 | 	START_USE(vq); | 
 | 299 |  | 
| Rusty Russell | 5ef8275 | 2008-05-02 21:50:43 -0500 | [diff] [blame] | 300 | 	if (unlikely(vq->broken)) { | 
 | 301 | 		END_USE(vq); | 
 | 302 | 		return NULL; | 
 | 303 | 	} | 
 | 304 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 305 | 	if (!more_used(vq)) { | 
 | 306 | 		pr_debug("No more buffers in queue\n"); | 
 | 307 | 		END_USE(vq); | 
 | 308 | 		return NULL; | 
 | 309 | 	} | 
 | 310 |  | 
| Michael S. Tsirkin | 2d61ba9 | 2009-10-25 15:28:53 +0200 | [diff] [blame] | 311 | 	/* Only get used array entries after they have been exposed by host. */ | 
| Michael S. Tsirkin | d57ed95 | 2010-01-28 00:42:23 +0200 | [diff] [blame] | 312 | 	virtio_rmb(); | 
| Michael S. Tsirkin | 2d61ba9 | 2009-10-25 15:28:53 +0200 | [diff] [blame] | 313 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 314 | 	i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; | 
 | 315 | 	*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; | 
 | 316 |  | 
 | 317 | 	if (unlikely(i >= vq->vring.num)) { | 
 | 318 | 		BAD_RING(vq, "id %u out of range\n", i); | 
 | 319 | 		return NULL; | 
 | 320 | 	} | 
 | 321 | 	if (unlikely(!vq->data[i])) { | 
 | 322 | 		BAD_RING(vq, "id %u is not a head!\n", i); | 
 | 323 | 		return NULL; | 
 | 324 | 	} | 
 | 325 |  | 
 | 326 | 	/* detach_buf clears data, so grab it now. */ | 
 | 327 | 	ret = vq->data[i]; | 
 | 328 | 	detach_buf(vq, i); | 
 | 329 | 	vq->last_used_idx++; | 
 | 330 | 	END_USE(vq); | 
 | 331 | 	return ret; | 
 | 332 | } | 
| Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 333 | EXPORT_SYMBOL_GPL(virtqueue_get_buf); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 334 |  | 
| Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 335 | void virtqueue_disable_cb(struct virtqueue *_vq) | 
| Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 336 | { | 
 | 337 | 	struct vring_virtqueue *vq = to_vvq(_vq); | 
 | 338 |  | 
| Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 339 | 	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | 
| Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 340 | } | 
| Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 341 | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); | 
| Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 342 |  | 
| Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 343 | bool virtqueue_enable_cb(struct virtqueue *_vq) | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 344 | { | 
 | 345 | 	struct vring_virtqueue *vq = to_vvq(_vq); | 
 | 346 |  | 
 | 347 | 	START_USE(vq); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 348 |  | 
 | 349 | 	/* We optimistically turn back on interrupts, then check if there was | 
 | 350 | 	 * more to do. */ | 
 | 351 | 	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; | 
| Michael S. Tsirkin | d57ed95 | 2010-01-28 00:42:23 +0200 | [diff] [blame] | 352 | 	virtio_mb(); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 353 | 	if (unlikely(more_used(vq))) { | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 354 | 		END_USE(vq); | 
 | 355 | 		return false; | 
 | 356 | 	} | 
 | 357 |  | 
 | 358 | 	END_USE(vq); | 
 | 359 | 	return true; | 
 | 360 | } | 
| Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 361 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 362 |  | 
| Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 363 | void *virtqueue_detach_unused_buf(struct virtqueue *_vq) | 
| Shirley Ma | c021eac | 2010-01-18 19:15:23 +0530 | [diff] [blame] | 364 | { | 
 | 365 | 	struct vring_virtqueue *vq = to_vvq(_vq); | 
 | 366 | 	unsigned int i; | 
 | 367 | 	void *buf; | 
 | 368 |  | 
 | 369 | 	START_USE(vq); | 
 | 370 |  | 
 | 371 | 	for (i = 0; i < vq->vring.num; i++) { | 
 | 372 | 		if (!vq->data[i]) | 
 | 373 | 			continue; | 
 | 374 | 		/* detach_buf clears data, so grab it now. */ | 
 | 375 | 		buf = vq->data[i]; | 
 | 376 | 		detach_buf(vq, i); | 
 | 377 | 		END_USE(vq); | 
 | 378 | 		return buf; | 
 | 379 | 	} | 
 | 380 | 	/* That should have freed everything. */ | 
 | 381 | 	BUG_ON(vq->num_free != vq->vring.num); | 
 | 382 |  | 
 | 383 | 	END_USE(vq); | 
 | 384 | 	return NULL; | 
 | 385 | } | 
| Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 386 | EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); | 
| Shirley Ma | c021eac | 2010-01-18 19:15:23 +0530 | [diff] [blame] | 387 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 388 | irqreturn_t vring_interrupt(int irq, void *_vq) | 
 | 389 | { | 
 | 390 | 	struct vring_virtqueue *vq = to_vvq(_vq); | 
 | 391 |  | 
 | 392 | 	if (!more_used(vq)) { | 
 | 393 | 		pr_debug("virtqueue interrupt with no work for %p\n", vq); | 
 | 394 | 		return IRQ_NONE; | 
 | 395 | 	} | 
 | 396 |  | 
 | 397 | 	if (unlikely(vq->broken)) | 
 | 398 | 		return IRQ_HANDLED; | 
 | 399 |  | 
 | 400 | 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); | 
| Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 401 | 	if (vq->vq.callback) | 
 | 402 | 		vq->vq.callback(&vq->vq); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 403 |  | 
 | 404 | 	return IRQ_HANDLED; | 
 | 405 | } | 
| Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 406 | EXPORT_SYMBOL_GPL(vring_interrupt); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 407 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 408 | struct virtqueue *vring_new_virtqueue(unsigned int num, | 
| Rusty Russell | 87c7d57 | 2008-12-30 09:26:03 -0600 | [diff] [blame] | 409 | 				      unsigned int vring_align, | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 410 | 				      struct virtio_device *vdev, | 
 | 411 | 				      void *pages, | 
 | 412 | 				      void (*notify)(struct virtqueue *), | 
| Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 413 | 				      void (*callback)(struct virtqueue *), | 
 | 414 | 				      const char *name) | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 415 | { | 
 | 416 | 	struct vring_virtqueue *vq; | 
 | 417 | 	unsigned int i; | 
 | 418 |  | 
| Rusty Russell | 42b36cc | 2007-11-12 13:39:18 +1100 | [diff] [blame] | 419 | 	/* We assume num is a power of 2. */ | 
 | 420 | 	if (num & (num - 1)) { | 
 | 421 | 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); | 
 | 422 | 		return NULL; | 
 | 423 | 	} | 
 | 424 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 425 | 	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); | 
 | 426 | 	if (!vq) | 
 | 427 | 		return NULL; | 
 | 428 |  | 
| Rusty Russell | 87c7d57 | 2008-12-30 09:26:03 -0600 | [diff] [blame] | 429 | 	vring_init(&vq->vring, num, pages, vring_align); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 430 | 	vq->vq.callback = callback; | 
 | 431 | 	vq->vq.vdev = vdev; | 
| Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 432 | 	vq->vq.name = name; | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 433 | 	vq->notify = notify; | 
 | 434 | 	vq->broken = false; | 
 | 435 | 	vq->last_used_idx = 0; | 
 | 436 | 	vq->num_added = 0; | 
| Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 437 | 	list_add_tail(&vq->vq.list, &vdev->vqs); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 438 | #ifdef DEBUG | 
 | 439 | 	vq->in_use = false; | 
 | 440 | #endif | 
 | 441 |  | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 442 | 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); | 
 | 443 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 444 | 	/* No callback?  Tell other side not to bother us. */ | 
 | 445 | 	if (!callback) | 
 | 446 | 		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | 
 | 447 |  | 
 | 448 | 	/* Put everything in free lists. */ | 
 | 449 | 	vq->num_free = num; | 
 | 450 | 	vq->free_head = 0; | 
| Amit Shah | 3b87062 | 2010-02-12 10:32:14 +0530 | [diff] [blame] | 451 | 	for (i = 0; i < num-1; i++) { | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 452 | 		vq->vring.desc[i].next = i+1; | 
| Amit Shah | 3b87062 | 2010-02-12 10:32:14 +0530 | [diff] [blame] | 453 | 		vq->data[i] = NULL; | 
 | 454 | 	} | 
 | 455 | 	vq->data[i] = NULL; | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 456 |  | 
 | 457 | 	return &vq->vq; | 
 | 458 | } | 
| Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 459 | EXPORT_SYMBOL_GPL(vring_new_virtqueue); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 460 |  | 
 | 461 | void vring_del_virtqueue(struct virtqueue *vq) | 
 | 462 | { | 
| Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 463 | 	list_del(&vq->list); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 464 | 	kfree(to_vvq(vq)); | 
 | 465 | } | 
| Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 466 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 467 |  | 
| Rusty Russell | e34f872 | 2008-07-25 12:06:13 -0500 | [diff] [blame] | 468 | /* Manipulates transport-specific feature bits. */ | 
 | 469 | void vring_transport_features(struct virtio_device *vdev) | 
 | 470 | { | 
 | 471 | 	unsigned int i; | 
 | 472 |  | 
 | 473 | 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { | 
 | 474 | 		switch (i) { | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 475 | 		case VIRTIO_RING_F_INDIRECT_DESC: | 
 | 476 | 			break; | 
| Rusty Russell | e34f872 | 2008-07-25 12:06:13 -0500 | [diff] [blame] | 477 | 		default: | 
 | 478 | 			/* We don't understand this bit. */ | 
 | 479 | 			clear_bit(i, vdev->features); | 
 | 480 | 		} | 
 | 481 | 	} | 
 | 482 | } | 
 | 483 | EXPORT_SYMBOL_GPL(vring_transport_features); | 
 | 484 |  | 
| Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 485 | MODULE_LICENSE("GPL"); |