|  | /* Virtio ring implementation. | 
|  | * | 
|  | *  Copyright 2007 Rusty Russell IBM Corporation | 
|  | * | 
|  | *  This program is free software; you can redistribute it and/or modify | 
|  | *  it under the terms of the GNU General Public License as published by | 
|  | *  the Free Software Foundation; either version 2 of the License, or | 
|  | *  (at your option) any later version. | 
|  | * | 
|  | *  This program is distributed in the hope that it will be useful, | 
|  | *  but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | *  GNU General Public License for more details. | 
|  | * | 
|  | *  You should have received a copy of the GNU General Public License | 
|  | *  along with this program; if not, write to the Free Software | 
|  | *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
|  | */ | 
|  | #include <linux/virtio.h> | 
|  | #include <linux/virtio_ring.h> | 
|  | #include <linux/virtio_config.h> | 
|  | #include <linux/device.h> | 
|  | #include <linux/slab.h> | 
|  | #include <linux/module.h> | 
|  | #include <linux/hrtimer.h> | 
|  | #include <linux/kmemleak.h> | 
|  |  | 
|  | #ifdef DEBUG | 
|  | /* For development, we want to crash whenever the ring is screwed. */ | 
|  | #define BAD_RING(_vq, fmt, args...)				\ | 
|  | do {							\ | 
|  | dev_err(&(_vq)->vq.vdev->dev,			\ | 
|  | "%s:"fmt, (_vq)->vq.name, ##args);	\ | 
|  | BUG();						\ | 
|  | } while (0) | 
|  | /* Caller is supposed to guarantee no reentry. */ | 
|  | #define START_USE(_vq)						\ | 
|  | do {							\ | 
|  | if ((_vq)->in_use)				\ | 
|  | panic("%s:in_use = %i\n",		\ | 
|  | (_vq)->vq.name, (_vq)->in_use);	\ | 
|  | (_vq)->in_use = __LINE__;			\ | 
|  | } while (0) | 
|  | #define END_USE(_vq) \ | 
|  | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) | 
|  | #else | 
|  | #define BAD_RING(_vq, fmt, args...)				\ | 
|  | do {							\ | 
|  | dev_err(&_vq->vq.vdev->dev,			\ | 
|  | "%s:"fmt, (_vq)->vq.name, ##args);	\ | 
|  | (_vq)->broken = true;				\ | 
|  | } while (0) | 
|  | #define START_USE(vq) | 
|  | #define END_USE(vq) | 
|  | #endif | 
|  |  | 
|  | struct vring_virtqueue { | 
|  | struct virtqueue vq; | 
|  |  | 
|  | /* Actual memory layout for this queue */ | 
|  | struct vring vring; | 
|  |  | 
|  | /* Can we use weak barriers? */ | 
|  | bool weak_barriers; | 
|  |  | 
|  | /* Other side has made a mess, don't try any more. */ | 
|  | bool broken; | 
|  |  | 
|  | /* Host supports indirect buffers */ | 
|  | bool indirect; | 
|  |  | 
|  | /* Host publishes avail event idx */ | 
|  | bool event; | 
|  |  | 
|  | /* Head of free buffer list. */ | 
|  | unsigned int free_head; | 
|  | /* Number we've added since last sync. */ | 
|  | unsigned int num_added; | 
|  |  | 
|  | /* Last used index we've seen. */ | 
|  | u16 last_used_idx; | 
|  |  | 
|  | /* How to notify other side. FIXME: commonalize hcalls! */ | 
|  | bool (*notify)(struct virtqueue *vq); | 
|  |  | 
|  | #ifdef DEBUG | 
|  | /* They're supposed to lock for us. */ | 
|  | unsigned int in_use; | 
|  |  | 
|  | /* Figure out if their kicks are too delayed. */ | 
|  | bool last_add_time_valid; | 
|  | ktime_t last_add_time; | 
|  | #endif | 
|  |  | 
|  | /* Tokens for callbacks. */ | 
|  | void *data[]; | 
|  | }; | 
|  |  | 
|  | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) | 
|  |  | 
|  | static struct vring_desc *alloc_indirect(struct virtqueue *_vq, | 
|  | unsigned int total_sg, gfp_t gfp) | 
|  | { | 
|  | struct vring_desc *desc; | 
|  | unsigned int i; | 
|  |  | 
|  | /* | 
|  | * We require lowmem mappings for the descriptors because | 
|  | * otherwise virt_to_phys will give us bogus addresses in the | 
|  | * virtqueue. | 
|  | */ | 
|  | gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); | 
|  |  | 
|  | desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); | 
|  | if (!desc) | 
|  | return NULL; | 
|  |  | 
|  | for (i = 0; i < total_sg; i++) | 
|  | desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); | 
|  | return desc; | 
|  | } | 
|  |  | 
|  | static inline int virtqueue_add(struct virtqueue *_vq, | 
|  | struct scatterlist *sgs[], | 
|  | unsigned int total_sg, | 
|  | unsigned int out_sgs, | 
|  | unsigned int in_sgs, | 
|  | void *data, | 
|  | gfp_t gfp) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  | struct scatterlist *sg; | 
|  | struct vring_desc *desc; | 
|  | unsigned int i, n, avail, descs_used, uninitialized_var(prev); | 
|  | int head; | 
|  | bool indirect; | 
|  |  | 
|  | START_USE(vq); | 
|  |  | 
|  | BUG_ON(data == NULL); | 
|  |  | 
|  | if (unlikely(vq->broken)) { | 
|  | END_USE(vq); | 
|  | return -EIO; | 
|  | } | 
|  |  | 
|  | #ifdef DEBUG | 
|  | { | 
|  | ktime_t now = ktime_get(); | 
|  |  | 
|  | /* No kick or get, with .1 second between?  Warn. */ | 
|  | if (vq->last_add_time_valid) | 
|  | WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) | 
|  | > 100); | 
|  | vq->last_add_time = now; | 
|  | vq->last_add_time_valid = true; | 
|  | } | 
|  | #endif | 
|  |  | 
|  | BUG_ON(total_sg > vq->vring.num); | 
|  | BUG_ON(total_sg == 0); | 
|  |  | 
|  | head = vq->free_head; | 
|  |  | 
|  | /* If the host supports indirect descriptor tables, and we have multiple | 
|  | * buffers, then go indirect. FIXME: tune this threshold */ | 
|  | if (vq->indirect && total_sg > 1 && vq->vq.num_free) | 
|  | desc = alloc_indirect(_vq, total_sg, gfp); | 
|  | else | 
|  | desc = NULL; | 
|  |  | 
|  | if (desc) { | 
|  | /* Use a single buffer which doesn't continue */ | 
|  | vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); | 
|  | vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc)); | 
|  | /* avoid kmemleak false positive (hidden by virt_to_phys) */ | 
|  | kmemleak_ignore(desc); | 
|  | vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); | 
|  |  | 
|  | /* Set up rest to use this indirect table. */ | 
|  | i = 0; | 
|  | descs_used = 1; | 
|  | indirect = true; | 
|  | } else { | 
|  | desc = vq->vring.desc; | 
|  | i = head; | 
|  | descs_used = total_sg; | 
|  | indirect = false; | 
|  | } | 
|  |  | 
|  | if (vq->vq.num_free < descs_used) { | 
|  | pr_debug("Can't add buf len %i - avail = %i\n", | 
|  | descs_used, vq->vq.num_free); | 
|  | /* FIXME: for historical reasons, we force a notify here if | 
|  | * there are outgoing parts to the buffer.  Presumably the | 
|  | * host should service the ring ASAP. */ | 
|  | if (out_sgs) | 
|  | vq->notify(&vq->vq); | 
|  | END_USE(vq); | 
|  | return -ENOSPC; | 
|  | } | 
|  |  | 
|  | /* We're about to use some buffers from the free list. */ | 
|  | vq->vq.num_free -= descs_used; | 
|  |  | 
|  | for (n = 0; n < out_sgs; n++) { | 
|  | for (sg = sgs[n]; sg; sg = sg_next(sg)) { | 
|  | desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); | 
|  | desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); | 
|  | desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); | 
|  | prev = i; | 
|  | i = virtio16_to_cpu(_vq->vdev, desc[i].next); | 
|  | } | 
|  | } | 
|  | for (; n < (out_sgs + in_sgs); n++) { | 
|  | for (sg = sgs[n]; sg; sg = sg_next(sg)) { | 
|  | desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); | 
|  | desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); | 
|  | desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); | 
|  | prev = i; | 
|  | i = virtio16_to_cpu(_vq->vdev, desc[i].next); | 
|  | } | 
|  | } | 
|  | /* Last one doesn't continue. */ | 
|  | desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); | 
|  |  | 
|  | /* Update free pointer */ | 
|  | if (indirect) | 
|  | vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); | 
|  | else | 
|  | vq->free_head = i; | 
|  |  | 
|  | /* Set token. */ | 
|  | vq->data[head] = data; | 
|  |  | 
|  | /* Put entry in available array (but don't update avail->idx until they | 
|  | * do sync). */ | 
|  | avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1); | 
|  | vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); | 
|  |  | 
|  | /* Descriptors and available array need to be set before we expose the | 
|  | * new available array entries. */ | 
|  | virtio_wmb(vq->weak_barriers); | 
|  | vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); | 
|  | vq->num_added++; | 
|  |  | 
|  | pr_debug("Added buffer head %i to %p\n", head, vq); | 
|  | END_USE(vq); | 
|  |  | 
|  | /* This is very unlikely, but theoretically possible.  Kick | 
|  | * just in case. */ | 
|  | if (unlikely(vq->num_added == (1 << 16) - 1)) | 
|  | virtqueue_kick(_vq); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * virtqueue_add_sgs - expose buffers to other end | 
|  | * @vq: the struct virtqueue we're talking about. | 
|  | * @sgs: array of terminated scatterlists. | 
|  | * @out_num: the number of scatterlists readable by other side | 
|  | * @in_num: the number of scatterlists which are writable (after readable ones) | 
|  | * @data: the token identifying the buffer. | 
|  | * @gfp: how to do memory allocations (if necessary). | 
|  | * | 
|  | * Caller must ensure we don't call this with other virtqueue operations | 
|  | * at the same time (except where noted). | 
|  | * | 
|  | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). | 
|  | */ | 
|  | int virtqueue_add_sgs(struct virtqueue *_vq, | 
|  | struct scatterlist *sgs[], | 
|  | unsigned int out_sgs, | 
|  | unsigned int in_sgs, | 
|  | void *data, | 
|  | gfp_t gfp) | 
|  | { | 
|  | unsigned int i, total_sg = 0; | 
|  |  | 
|  | /* Count them first. */ | 
|  | for (i = 0; i < out_sgs + in_sgs; i++) { | 
|  | struct scatterlist *sg; | 
|  | for (sg = sgs[i]; sg; sg = sg_next(sg)) | 
|  | total_sg++; | 
|  | } | 
|  | return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_add_sgs); | 
|  |  | 
|  | /** | 
|  | * virtqueue_add_outbuf - expose output buffers to other end | 
|  | * @vq: the struct virtqueue we're talking about. | 
|  | * @sg: scatterlist (must be well-formed and terminated!) | 
|  | * @num: the number of entries in @sg readable by other side | 
|  | * @data: the token identifying the buffer. | 
|  | * @gfp: how to do memory allocations (if necessary). | 
|  | * | 
|  | * Caller must ensure we don't call this with other virtqueue operations | 
|  | * at the same time (except where noted). | 
|  | * | 
|  | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). | 
|  | */ | 
|  | int virtqueue_add_outbuf(struct virtqueue *vq, | 
|  | struct scatterlist *sg, unsigned int num, | 
|  | void *data, | 
|  | gfp_t gfp) | 
|  | { | 
|  | return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); | 
|  |  | 
|  | /** | 
|  | * virtqueue_add_inbuf - expose input buffers to other end | 
|  | * @vq: the struct virtqueue we're talking about. | 
|  | * @sg: scatterlist (must be well-formed and terminated!) | 
|  | * @num: the number of entries in @sg writable by other side | 
|  | * @data: the token identifying the buffer. | 
|  | * @gfp: how to do memory allocations (if necessary). | 
|  | * | 
|  | * Caller must ensure we don't call this with other virtqueue operations | 
|  | * at the same time (except where noted). | 
|  | * | 
|  | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). | 
|  | */ | 
|  | int virtqueue_add_inbuf(struct virtqueue *vq, | 
|  | struct scatterlist *sg, unsigned int num, | 
|  | void *data, | 
|  | gfp_t gfp) | 
|  | { | 
|  | return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); | 
|  |  | 
|  | /** | 
|  | * virtqueue_kick_prepare - first half of split virtqueue_kick call. | 
|  | * @vq: the struct virtqueue | 
|  | * | 
|  | * Instead of virtqueue_kick(), you can do: | 
|  | *	if (virtqueue_kick_prepare(vq)) | 
|  | *		virtqueue_notify(vq); | 
|  | * | 
|  | * This is sometimes useful because the virtqueue_kick_prepare() needs | 
|  | * to be serialized, but the actual virtqueue_notify() call does not. | 
|  | */ | 
|  | bool virtqueue_kick_prepare(struct virtqueue *_vq) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  | u16 new, old; | 
|  | bool needs_kick; | 
|  |  | 
|  | START_USE(vq); | 
|  | /* We need to expose available array entries before checking avail | 
|  | * event. */ | 
|  | virtio_mb(vq->weak_barriers); | 
|  |  | 
|  | old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added; | 
|  | new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx); | 
|  | vq->num_added = 0; | 
|  |  | 
|  | #ifdef DEBUG | 
|  | if (vq->last_add_time_valid) { | 
|  | WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), | 
|  | vq->last_add_time)) > 100); | 
|  | } | 
|  | vq->last_add_time_valid = false; | 
|  | #endif | 
|  |  | 
|  | if (vq->event) { | 
|  | needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), | 
|  | new, old); | 
|  | } else { | 
|  | needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); | 
|  | } | 
|  | END_USE(vq); | 
|  | return needs_kick; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); | 
|  |  | 
|  | /** | 
|  | * virtqueue_notify - second half of split virtqueue_kick call. | 
|  | * @vq: the struct virtqueue | 
|  | * | 
|  | * This does not need to be serialized. | 
|  | * | 
|  | * Returns false if host notify failed or queue is broken, otherwise true. | 
|  | */ | 
|  | bool virtqueue_notify(struct virtqueue *_vq) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  |  | 
|  | if (unlikely(vq->broken)) | 
|  | return false; | 
|  |  | 
|  | /* Prod other side to tell it about changes. */ | 
|  | if (!vq->notify(_vq)) { | 
|  | vq->broken = true; | 
|  | return false; | 
|  | } | 
|  | return true; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_notify); | 
|  |  | 
|  | /** | 
|  | * virtqueue_kick - update after add_buf | 
|  | * @vq: the struct virtqueue | 
|  | * | 
|  | * After one or more virtqueue_add_* calls, invoke this to kick | 
|  | * the other side. | 
|  | * | 
|  | * Caller must ensure we don't call this with other virtqueue | 
|  | * operations at the same time (except where noted). | 
|  | * | 
|  | * Returns false if kick failed, otherwise true. | 
|  | */ | 
|  | bool virtqueue_kick(struct virtqueue *vq) | 
|  | { | 
|  | if (virtqueue_kick_prepare(vq)) | 
|  | return virtqueue_notify(vq); | 
|  | return true; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_kick); | 
|  |  | 
|  | static void detach_buf(struct vring_virtqueue *vq, unsigned int head) | 
|  | { | 
|  | unsigned int i; | 
|  |  | 
|  | /* Clear data ptr. */ | 
|  | vq->data[head] = NULL; | 
|  |  | 
|  | /* Put back on free list: find end */ | 
|  | i = head; | 
|  |  | 
|  | /* Free the indirect table */ | 
|  | if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)) | 
|  | kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr))); | 
|  |  | 
|  | while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) { | 
|  | i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); | 
|  | vq->vq.num_free++; | 
|  | } | 
|  |  | 
|  | vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); | 
|  | vq->free_head = head; | 
|  | /* Plus final descriptor */ | 
|  | vq->vq.num_free++; | 
|  | } | 
|  |  | 
|  | static inline bool more_used(const struct vring_virtqueue *vq) | 
|  | { | 
|  | return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); | 
|  | } | 
|  |  | 
|  | /** | 
|  | * virtqueue_get_buf - get the next used buffer | 
|  | * @vq: the struct virtqueue we're talking about. | 
|  | * @len: the length written into the buffer | 
|  | * | 
|  | * If the driver wrote data into the buffer, @len will be set to the | 
|  | * amount written.  This means you don't need to clear the buffer | 
|  | * beforehand to ensure there's no data leakage in the case of short | 
|  | * writes. | 
|  | * | 
|  | * Caller must ensure we don't call this with other virtqueue | 
|  | * operations at the same time (except where noted). | 
|  | * | 
|  | * Returns NULL if there are no used buffers, or the "data" token | 
|  | * handed to virtqueue_add_*(). | 
|  | */ | 
|  | void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  | void *ret; | 
|  | unsigned int i; | 
|  | u16 last_used; | 
|  |  | 
|  | START_USE(vq); | 
|  |  | 
|  | if (unlikely(vq->broken)) { | 
|  | END_USE(vq); | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | if (!more_used(vq)) { | 
|  | pr_debug("No more buffers in queue\n"); | 
|  | END_USE(vq); | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | /* Only get used array entries after they have been exposed by host. */ | 
|  | virtio_rmb(vq->weak_barriers); | 
|  |  | 
|  | last_used = (vq->last_used_idx & (vq->vring.num - 1)); | 
|  | i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); | 
|  | *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); | 
|  |  | 
|  | if (unlikely(i >= vq->vring.num)) { | 
|  | BAD_RING(vq, "id %u out of range\n", i); | 
|  | return NULL; | 
|  | } | 
|  | if (unlikely(!vq->data[i])) { | 
|  | BAD_RING(vq, "id %u is not a head!\n", i); | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | /* detach_buf clears data, so grab it now. */ | 
|  | ret = vq->data[i]; | 
|  | detach_buf(vq, i); | 
|  | vq->last_used_idx++; | 
|  | /* If we expect an interrupt for the next entry, tell host | 
|  | * by writing event index and flush out the write before | 
|  | * the read in the next get_buf call. */ | 
|  | if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) { | 
|  | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); | 
|  | virtio_mb(vq->weak_barriers); | 
|  | } | 
|  |  | 
|  | #ifdef DEBUG | 
|  | vq->last_add_time_valid = false; | 
|  | #endif | 
|  |  | 
|  | END_USE(vq); | 
|  | return ret; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_get_buf); | 
|  |  | 
|  | /** | 
|  | * virtqueue_disable_cb - disable callbacks | 
|  | * @vq: the struct virtqueue we're talking about. | 
|  | * | 
|  | * Note that this is not necessarily synchronous, hence unreliable and only | 
|  | * useful as an optimization. | 
|  | * | 
|  | * Unlike other operations, this need not be serialized. | 
|  | */ | 
|  | void virtqueue_disable_cb(struct virtqueue *_vq) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  |  | 
|  | vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); | 
|  |  | 
|  | /** | 
|  | * virtqueue_enable_cb_prepare - restart callbacks after disable_cb | 
|  | * @vq: the struct virtqueue we're talking about. | 
|  | * | 
|  | * This re-enables callbacks; it returns current queue state | 
|  | * in an opaque unsigned value. This value should be later tested by | 
|  | * virtqueue_poll, to detect a possible race between the driver checking for | 
|  | * more work, and enabling callbacks. | 
|  | * | 
|  | * Caller must ensure we don't call this with other virtqueue | 
|  | * operations at the same time (except where noted). | 
|  | */ | 
|  | unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  | u16 last_used_idx; | 
|  |  | 
|  | START_USE(vq); | 
|  |  | 
|  | /* We optimistically turn back on interrupts, then check if there was | 
|  | * more to do. */ | 
|  | /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to | 
|  | * either clear the flags bit or point the event index at the next | 
|  | * entry. Always do both to keep code simple. */ | 
|  | vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); | 
|  | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); | 
|  | END_USE(vq); | 
|  | return last_used_idx; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); | 
|  |  | 
|  | /** | 
|  | * virtqueue_poll - query pending used buffers | 
|  | * @vq: the struct virtqueue we're talking about. | 
|  | * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). | 
|  | * | 
|  | * Returns "true" if there are pending used buffers in the queue. | 
|  | * | 
|  | * This does not need to be serialized. | 
|  | */ | 
|  | bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  |  | 
|  | virtio_mb(vq->weak_barriers); | 
|  | return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_poll); | 
|  |  | 
|  | /** | 
|  | * virtqueue_enable_cb - restart callbacks after disable_cb. | 
|  | * @vq: the struct virtqueue we're talking about. | 
|  | * | 
|  | * This re-enables callbacks; it returns "false" if there are pending | 
|  | * buffers in the queue, to detect a possible race between the driver | 
|  | * checking for more work, and enabling callbacks. | 
|  | * | 
|  | * Caller must ensure we don't call this with other virtqueue | 
|  | * operations at the same time (except where noted). | 
|  | */ | 
|  | bool virtqueue_enable_cb(struct virtqueue *_vq) | 
|  | { | 
|  | unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); | 
|  | return !virtqueue_poll(_vq, last_used_idx); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); | 
|  |  | 
|  | /** | 
|  | * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. | 
|  | * @vq: the struct virtqueue we're talking about. | 
|  | * | 
|  | * This re-enables callbacks but hints to the other side to delay | 
|  | * interrupts until most of the available buffers have been processed; | 
|  | * it returns "false" if there are many pending buffers in the queue, | 
|  | * to detect a possible race between the driver checking for more work, | 
|  | * and enabling callbacks. | 
|  | * | 
|  | * Caller must ensure we don't call this with other virtqueue | 
|  | * operations at the same time (except where noted). | 
|  | */ | 
|  | bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  | u16 bufs; | 
|  |  | 
|  | START_USE(vq); | 
|  |  | 
|  | /* We optimistically turn back on interrupts, then check if there was | 
|  | * more to do. */ | 
|  | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to | 
|  | * either clear the flags bit or point the event index at the next | 
|  | * entry. Always do both to keep code simple. */ | 
|  | vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); | 
|  | /* TODO: tune this threshold */ | 
|  | bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4; | 
|  | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); | 
|  | virtio_mb(vq->weak_barriers); | 
|  | if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { | 
|  | END_USE(vq); | 
|  | return false; | 
|  | } | 
|  |  | 
|  | END_USE(vq); | 
|  | return true; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); | 
|  |  | 
|  | /** | 
|  | * virtqueue_detach_unused_buf - detach first unused buffer | 
|  | * @vq: the struct virtqueue we're talking about. | 
|  | * | 
|  | * Returns NULL or the "data" token handed to virtqueue_add_*(). | 
|  | * This is not valid on an active queue; it is useful only for device | 
|  | * shutdown. | 
|  | */ | 
|  | void *virtqueue_detach_unused_buf(struct virtqueue *_vq) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  | unsigned int i; | 
|  | void *buf; | 
|  |  | 
|  | START_USE(vq); | 
|  |  | 
|  | for (i = 0; i < vq->vring.num; i++) { | 
|  | if (!vq->data[i]) | 
|  | continue; | 
|  | /* detach_buf clears data, so grab it now. */ | 
|  | buf = vq->data[i]; | 
|  | detach_buf(vq, i); | 
|  | vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1); | 
|  | END_USE(vq); | 
|  | return buf; | 
|  | } | 
|  | /* That should have freed everything. */ | 
|  | BUG_ON(vq->vq.num_free != vq->vring.num); | 
|  |  | 
|  | END_USE(vq); | 
|  | return NULL; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); | 
|  |  | 
|  | irqreturn_t vring_interrupt(int irq, void *_vq) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  |  | 
|  | if (!more_used(vq)) { | 
|  | pr_debug("virtqueue interrupt with no work for %p\n", vq); | 
|  | return IRQ_NONE; | 
|  | } | 
|  |  | 
|  | if (unlikely(vq->broken)) | 
|  | return IRQ_HANDLED; | 
|  |  | 
|  | pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); | 
|  | if (vq->vq.callback) | 
|  | vq->vq.callback(&vq->vq); | 
|  |  | 
|  | return IRQ_HANDLED; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(vring_interrupt); | 
|  |  | 
|  | struct virtqueue *vring_new_virtqueue(unsigned int index, | 
|  | unsigned int num, | 
|  | unsigned int vring_align, | 
|  | struct virtio_device *vdev, | 
|  | bool weak_barriers, | 
|  | void *pages, | 
|  | bool (*notify)(struct virtqueue *), | 
|  | void (*callback)(struct virtqueue *), | 
|  | const char *name) | 
|  | { | 
|  | struct vring_virtqueue *vq; | 
|  | unsigned int i; | 
|  |  | 
|  | /* We assume num is a power of 2. */ | 
|  | if (num & (num - 1)) { | 
|  | dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); | 
|  | if (!vq) | 
|  | return NULL; | 
|  |  | 
|  | vring_init(&vq->vring, num, pages, vring_align); | 
|  | vq->vq.callback = callback; | 
|  | vq->vq.vdev = vdev; | 
|  | vq->vq.name = name; | 
|  | vq->vq.num_free = num; | 
|  | vq->vq.index = index; | 
|  | vq->notify = notify; | 
|  | vq->weak_barriers = weak_barriers; | 
|  | vq->broken = false; | 
|  | vq->last_used_idx = 0; | 
|  | vq->num_added = 0; | 
|  | list_add_tail(&vq->vq.list, &vdev->vqs); | 
|  | #ifdef DEBUG | 
|  | vq->in_use = false; | 
|  | vq->last_add_time_valid = false; | 
|  | #endif | 
|  |  | 
|  | vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); | 
|  | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); | 
|  |  | 
|  | /* No callback?  Tell other side not to bother us. */ | 
|  | if (!callback) | 
|  | vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT); | 
|  |  | 
|  | /* Put everything in free lists. */ | 
|  | vq->free_head = 0; | 
|  | for (i = 0; i < num-1; i++) { | 
|  | vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); | 
|  | vq->data[i] = NULL; | 
|  | } | 
|  | vq->data[i] = NULL; | 
|  |  | 
|  | return &vq->vq; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(vring_new_virtqueue); | 
|  |  | 
|  | void vring_del_virtqueue(struct virtqueue *vq) | 
|  | { | 
|  | list_del(&vq->list); | 
|  | kfree(to_vvq(vq)); | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(vring_del_virtqueue); | 
|  |  | 
|  | /* Manipulates transport-specific feature bits. */ | 
|  | void vring_transport_features(struct virtio_device *vdev) | 
|  | { | 
|  | unsigned int i; | 
|  |  | 
|  | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { | 
|  | switch (i) { | 
|  | case VIRTIO_RING_F_INDIRECT_DESC: | 
|  | break; | 
|  | case VIRTIO_RING_F_EVENT_IDX: | 
|  | break; | 
|  | case VIRTIO_F_VERSION_1: | 
|  | break; | 
|  | default: | 
|  | /* We don't understand this bit. */ | 
|  | __virtio_clear_bit(vdev, i); | 
|  | } | 
|  | } | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(vring_transport_features); | 
|  |  | 
|  | /** | 
|  | * virtqueue_get_vring_size - return the size of the virtqueue's vring | 
|  | * @vq: the struct virtqueue containing the vring of interest. | 
|  | * | 
|  | * Returns the size of the vring.  This is mainly used for boasting to | 
|  | * userspace.  Unlike other operations, this need not be serialized. | 
|  | */ | 
|  | unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) | 
|  | { | 
|  |  | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  |  | 
|  | return vq->vring.num; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); | 
|  |  | 
|  | bool virtqueue_is_broken(struct virtqueue *_vq) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  |  | 
|  | return vq->broken; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_is_broken); | 
|  |  | 
|  | /* | 
|  | * This should prevent the device from being used, allowing drivers to | 
|  | * recover.  You may need to grab appropriate locks to flush. | 
|  | */ | 
|  | void virtio_break_device(struct virtio_device *dev) | 
|  | { | 
|  | struct virtqueue *_vq; | 
|  |  | 
|  | list_for_each_entry(_vq, &dev->vqs, list) { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  | vq->broken = true; | 
|  | } | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtio_break_device); | 
|  |  | 
|  | void *virtqueue_get_avail(struct virtqueue *_vq) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  |  | 
|  | return vq->vring.avail; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_get_avail); | 
|  |  | 
|  | void *virtqueue_get_used(struct virtqueue *_vq) | 
|  | { | 
|  | struct vring_virtqueue *vq = to_vvq(_vq); | 
|  |  | 
|  | return vq->vring.used; | 
|  | } | 
|  | EXPORT_SYMBOL_GPL(virtqueue_get_used); | 
|  |  | 
|  | MODULE_LICENSE("GPL"); |