| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 1 | #ifndef _LINUX_VIRTIO_RING_H | 
|  | 2 | #define _LINUX_VIRTIO_RING_H | 
|  | 3 | /* An interface for efficient virtio implementation, currently for use by KVM | 
|  | 4 | * and lguest, but hopefully others soon.  Do NOT change this since it will | 
|  | 5 | * break existing servers and clients. | 
|  | 6 | * | 
|  | 7 | * This header is BSD licensed so anyone can use the definitions to implement | 
|  | 8 | * compatible drivers/servers. | 
|  | 9 | * | 
| Rusty Russell | a1b3838 | 2011-05-30 11:14:13 -0600 | [diff] [blame] | 10 | * Redistribution and use in source and binary forms, with or without | 
|  | 11 | * modification, are permitted provided that the following conditions | 
|  | 12 | * are met: | 
|  | 13 | * 1. Redistributions of source code must retain the above copyright | 
|  | 14 | *    notice, this list of conditions and the following disclaimer. | 
|  | 15 | * 2. Redistributions in binary form must reproduce the above copyright | 
|  | 16 | *    notice, this list of conditions and the following disclaimer in the | 
|  | 17 | *    documentation and/or other materials provided with the distribution. | 
|  | 18 | * 3. Neither the name of IBM nor the names of its contributors | 
|  | 19 | *    may be used to endorse or promote products derived from this software | 
|  | 20 | *    without specific prior written permission. | 
|  | 21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND | 
|  | 22 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 
|  | 23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | 
|  | 24 | * ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE | 
|  | 25 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | 
|  | 26 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | 
|  | 27 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | 
|  | 28 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | 
|  | 29 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 
|  | 30 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 
|  | 31 | * SUCH DAMAGE. | 
|  | 32 | * | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 33 | * Copyright Rusty Russell IBM Corporation 2007. */ | 
|  | 34 | #include <linux/types.h> | 
|  | 35 |  | 
|  | 36 | /* This marks a buffer as continuing via the next field. */ | 
|  | 37 | #define VRING_DESC_F_NEXT	1 | 
|  | 38 | /* This marks a buffer as write-only (otherwise read-only). */ | 
|  | 39 | #define VRING_DESC_F_WRITE	2 | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 40 | /* This means the buffer contains a list of buffer descriptors. */ | 
|  | 41 | #define VRING_DESC_F_INDIRECT	4 | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 42 |  | 
| Rusty Russell | 426e3e0 | 2008-02-04 23:49:59 -0500 | [diff] [blame] | 43 | /* The Host uses this in used->flags to advise the Guest: don't kick me when | 
|  | 44 | * you add a buffer.  It's unreliable, so it's simply an optimization.  Guest | 
|  | 45 | * will still kick if it's out of buffers. */ | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 46 | #define VRING_USED_F_NO_NOTIFY	1 | 
| Rusty Russell | 426e3e0 | 2008-02-04 23:49:59 -0500 | [diff] [blame] | 47 | /* The Guest uses this in avail->flags to advise the Host: don't interrupt me | 
|  | 48 | * when you consume a buffer.  It's unreliable, so it's simply an | 
|  | 49 | * optimization.  */ | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 50 | #define VRING_AVAIL_F_NO_INTERRUPT	1 | 
|  | 51 |  | 
| Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 52 | /* We support indirect buffer descriptors */ | 
|  | 53 | #define VIRTIO_RING_F_INDIRECT_DESC	28 | 
|  | 54 |  | 
| Michael S. Tsirkin | 770b31a | 2011-05-20 02:10:17 +0300 | [diff] [blame] | 55 | /* The Guest publishes the used index for which it expects an interrupt | 
|  | 56 | * at the end of the avail ring. Host should ignore the avail->flags field. */ | 
|  | 57 | /* The Host publishes the avail index for which it expects a kick | 
|  | 58 | * at the end of the used ring. Guest should ignore the used->flags field. */ | 
|  | 59 | #define VIRTIO_RING_F_EVENT_IDX		29 | 
|  | 60 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 61 | /* Virtio ring descriptors: 16 bytes.  These can chain together via "next". */ | 
| Rusty Russell | 1842f23 | 2009-07-30 16:03:46 -0600 | [diff] [blame] | 62 | struct vring_desc { | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 63 | /* Address (guest-physical). */ | 
|  | 64 | __u64 addr; | 
|  | 65 | /* Length. */ | 
|  | 66 | __u32 len; | 
|  | 67 | /* The flags as indicated above. */ | 
|  | 68 | __u16 flags; | 
|  | 69 | /* We chain unused descriptors via this, too */ | 
|  | 70 | __u16 next; | 
|  | 71 | }; | 
|  | 72 |  | 
| Rusty Russell | 1842f23 | 2009-07-30 16:03:46 -0600 | [diff] [blame] | 73 | struct vring_avail { | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 74 | __u16 flags; | 
|  | 75 | __u16 idx; | 
|  | 76 | __u16 ring[]; | 
|  | 77 | }; | 
|  | 78 |  | 
|  | 79 | /* u32 is used here for ids for padding reasons. */ | 
| Rusty Russell | 1842f23 | 2009-07-30 16:03:46 -0600 | [diff] [blame] | 80 | struct vring_used_elem { | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 81 | /* Index of start of used descriptor chain. */ | 
|  | 82 | __u32 id; | 
|  | 83 | /* Total length of the descriptor chain which was used (written to) */ | 
|  | 84 | __u32 len; | 
|  | 85 | }; | 
|  | 86 |  | 
| Rusty Russell | 1842f23 | 2009-07-30 16:03:46 -0600 | [diff] [blame] | 87 | struct vring_used { | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 88 | __u16 flags; | 
|  | 89 | __u16 idx; | 
|  | 90 | struct vring_used_elem ring[]; | 
|  | 91 | }; | 
|  | 92 |  | 
|  | 93 | struct vring { | 
|  | 94 | unsigned int num; | 
|  | 95 |  | 
|  | 96 | struct vring_desc *desc; | 
|  | 97 |  | 
|  | 98 | struct vring_avail *avail; | 
|  | 99 |  | 
|  | 100 | struct vring_used *used; | 
|  | 101 | }; | 
|  | 102 |  | 
|  | 103 | /* The standard layout for the ring is a continuous chunk of memory which looks | 
| Rusty Russell | 42b36cc | 2007-11-12 13:39:18 +1100 | [diff] [blame] | 104 | * like this.  We assume num is a power of 2. | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 105 | * | 
|  | 106 | * struct vring | 
|  | 107 | * { | 
|  | 108 | *	// The actual descriptors (16 bytes each) | 
|  | 109 | *	struct vring_desc desc[num]; | 
|  | 110 | * | 
|  | 111 | *	// A ring of available descriptor heads with free-running index. | 
|  | 112 | *	__u16 avail_flags; | 
|  | 113 | *	__u16 avail_idx; | 
|  | 114 | *	__u16 available[num]; | 
| Michael S. Tsirkin | 770b31a | 2011-05-20 02:10:17 +0300 | [diff] [blame] | 115 | *	__u16 used_event_idx; | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 116 | * | 
| Rusty Russell | 5f0d1d7 | 2008-12-30 09:25:57 -0600 | [diff] [blame] | 117 | *	// Padding to the next align boundary. | 
| Rusty Russell | 42b36cc | 2007-11-12 13:39:18 +1100 | [diff] [blame] | 118 | *	char pad[]; | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 119 | * | 
|  | 120 | *	// A ring of used descriptor heads with free-running index. | 
|  | 121 | *	__u16 used_flags; | 
|  | 122 | *	__u16 used_idx; | 
|  | 123 | *	struct vring_used_elem used[num]; | 
| Michael S. Tsirkin | 770b31a | 2011-05-20 02:10:17 +0300 | [diff] [blame] | 124 | *	__u16 avail_event_idx; | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 125 | * }; | 
|  | 126 | */ | 
| Michael S. Tsirkin | 770b31a | 2011-05-20 02:10:17 +0300 | [diff] [blame] | 127 | /* We publish the used event index at the end of the available ring, and vice | 
|  | 128 | * versa. They are at the end for backwards compatibility. */ | 
|  | 129 | #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) | 
|  | 130 | #define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num]) | 
|  | 131 |  | 
| Rusty Russell | 42b36cc | 2007-11-12 13:39:18 +1100 | [diff] [blame] | 132 | static inline void vring_init(struct vring *vr, unsigned int num, void *p, | 
| Rusty Russell | 5f0d1d7 | 2008-12-30 09:25:57 -0600 | [diff] [blame] | 133 | unsigned long align) | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 134 | { | 
|  | 135 | vr->num = num; | 
|  | 136 | vr->desc = p; | 
| Anthony Liguori | 44332f7 | 2007-11-07 16:31:52 +1100 | [diff] [blame] | 137 | vr->avail = p + num*sizeof(struct vring_desc); | 
| Wang Sheng-Hui | 00b894e | 2011-08-29 15:55:59 +0800 | [diff] [blame] | 138 | vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__u16) | 
|  | 139 | + align-1) & ~(align - 1)); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 140 | } | 
|  | 141 |  | 
| Rusty Russell | 5f0d1d7 | 2008-12-30 09:25:57 -0600 | [diff] [blame] | 142 | static inline unsigned vring_size(unsigned int num, unsigned long align) | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 143 | { | 
| Wang Sheng-Hui | 00b894e | 2011-08-29 15:55:59 +0800 | [diff] [blame] | 144 | return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) | 
| Rusty Russell | 5f0d1d7 | 2008-12-30 09:25:57 -0600 | [diff] [blame] | 145 | + align - 1) & ~(align - 1)) | 
| Michael S. Tsirkin | 770b31a | 2011-05-20 02:10:17 +0300 | [diff] [blame] | 146 | + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 147 | } | 
|  | 148 |  | 
| Michael S. Tsirkin | bf7035b | 2011-05-20 02:10:27 +0300 | [diff] [blame] | 149 | /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ | 
|  | 150 | /* Assuming a given event_idx value from the other size, if | 
|  | 151 | * we have just incremented index from old to new_idx, | 
|  | 152 | * should we trigger an event? */ | 
|  | 153 | static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) | 
|  | 154 | { | 
|  | 155 | /* Note: Xen has similar logic for notification hold-off | 
|  | 156 | * in include/xen/interface/io/ring.h with req_event and req_prod | 
|  | 157 | * corresponding to event_idx + 1 and new_idx respectively. | 
|  | 158 | * Note also that req_event and req_prod in Xen start at 1, | 
|  | 159 | * event indexes in virtio start at 0. */ | 
|  | 160 | return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old); | 
|  | 161 | } | 
|  | 162 |  | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 163 | #ifdef __KERNEL__ | 
|  | 164 | #include <linux/irqreturn.h> | 
|  | 165 | struct virtio_device; | 
|  | 166 | struct virtqueue; | 
|  | 167 |  | 
|  | 168 | struct virtqueue *vring_new_virtqueue(unsigned int num, | 
| Rusty Russell | 87c7d57 | 2008-12-30 09:26:03 -0600 | [diff] [blame] | 169 | unsigned int vring_align, | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 170 | struct virtio_device *vdev, | 
| Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 171 | bool weak_barriers, | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 172 | void *pages, | 
|  | 173 | void (*notify)(struct virtqueue *vq), | 
| Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 174 | void (*callback)(struct virtqueue *vq), | 
|  | 175 | const char *name); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 176 | void vring_del_virtqueue(struct virtqueue *vq); | 
| Rusty Russell | e34f872 | 2008-07-25 12:06:13 -0500 | [diff] [blame] | 177 | /* Filter out transport-specific feature bits. */ | 
|  | 178 | void vring_transport_features(struct virtio_device *vdev); | 
| Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 179 |  | 
|  | 180 | irqreturn_t vring_interrupt(int irq, void *_vq); | 
|  | 181 | #endif /* __KERNEL__ */ | 
|  | 182 | #endif /* _LINUX_VIRTIO_RING_H */ |