You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull virtio update from Rusty Russell: "Some nice cleanups, and even a patch my wife did as a "live" demo for Latinoware 2012. There's a slightly non-trivial merge in virtio-net, as we cleaned up the virtio add_buf interface while DaveM accepted the mq virtio-net patches." * tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (27 commits) virtio_console: Add support for remoteproc serial virtio_console: Merge struct buffer_token into struct port_buffer virtio: add drv_to_virtio to make code clearly virtio: use dev_to_virtio wrapper in virtio virtio-mmio: Fix irq parsing in command line parameter virtio_console: Free buffers from out-queue upon close virtio: Convert dev_printk(KERN_<LEVEL> to dev_<level>( virtio_console: Use kmalloc instead of kzalloc virtio_console: Free buffer if splice fails virtio: tools: make it clear that virtqueue_add_buf() no longer returns > 0 virtio: scsi: make it clear that virtqueue_add_buf() no longer returns > 0 virtio: rpmsg: make it clear that virtqueue_add_buf() no longer returns > 0 virtio: net: make it clear that virtqueue_add_buf() no longer returns > 0 virtio: console: make it clear that virtqueue_add_buf() no longer returns > 0 virtio: make virtqueue_add_buf() returning 0 on success, not capacity. virtio: console: don't rely on virtqueue_add_buf() returning capacity. virtio_net: don't rely on virtqueue_add_buf() returning capacity. virtio-net: remove unused skb_vnet_hdr->num_sg field virtio-net: correct capacity math on ring full virtio: move queue_index and num_free fields into core struct virtqueue. ...
This commit is contained in:
+230
-99
File diff suppressed because it is too large
Load Diff
@@ -225,7 +225,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
|
||||
* eventfd (ie. the appropriate virtqueue thread)?
|
||||
*/
|
||||
if (!send_notify_to_eventfd(cpu)) {
|
||||
/* OK, we tell the main Laucher. */
|
||||
/* OK, we tell the main Launcher. */
|
||||
if (put_user(cpu->pending_notify, user))
|
||||
return -EFAULT;
|
||||
return sizeof(cpu->pending_notify);
|
||||
|
||||
+18
-28
@@ -130,7 +130,6 @@ struct skb_vnet_hdr {
|
||||
struct virtio_net_hdr hdr;
|
||||
struct virtio_net_hdr_mrg_rxbuf mhdr;
|
||||
};
|
||||
unsigned int num_sg;
|
||||
};
|
||||
|
||||
struct padded_vnet_hdr {
|
||||
@@ -530,10 +529,10 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
|
||||
err = add_recvbuf_small(rq, gfp);
|
||||
|
||||
oom = err == -ENOMEM;
|
||||
if (err < 0)
|
||||
if (err)
|
||||
break;
|
||||
++rq->num;
|
||||
} while (err > 0);
|
||||
} while (rq->vq->num_free);
|
||||
if (unlikely(rq->num > rq->max))
|
||||
rq->max = rq->num;
|
||||
virtqueue_kick(rq->vq);
|
||||
@@ -640,10 +639,10 @@ static int virtnet_open(struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int free_old_xmit_skbs(struct send_queue *sq)
|
||||
static void free_old_xmit_skbs(struct send_queue *sq)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
unsigned int len, tot_sgs = 0;
|
||||
unsigned int len;
|
||||
struct virtnet_info *vi = sq->vq->vdev->priv;
|
||||
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
|
||||
|
||||
@@ -655,10 +654,8 @@ static unsigned int free_old_xmit_skbs(struct send_queue *sq)
|
||||
stats->tx_packets++;
|
||||
u64_stats_update_end(&stats->tx_syncp);
|
||||
|
||||
tot_sgs += skb_vnet_hdr(skb)->num_sg;
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
return tot_sgs;
|
||||
}
|
||||
|
||||
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
||||
@@ -666,6 +663,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
||||
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
|
||||
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
|
||||
struct virtnet_info *vi = sq->vq->vdev->priv;
|
||||
unsigned num_sg;
|
||||
|
||||
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
|
||||
|
||||
@@ -704,8 +702,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
||||
else
|
||||
sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
|
||||
|
||||
hdr->num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
|
||||
return virtqueue_add_buf(sq->vq, sq->sg, hdr->num_sg,
|
||||
num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
|
||||
return virtqueue_add_buf(sq->vq, sq->sg, num_sg,
|
||||
0, skb, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
@@ -714,28 +712,20 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
int qnum = skb_get_queue_mapping(skb);
|
||||
struct send_queue *sq = &vi->sq[qnum];
|
||||
int capacity;
|
||||
int err;
|
||||
|
||||
/* Free up any pending old buffers before queueing new ones. */
|
||||
free_old_xmit_skbs(sq);
|
||||
|
||||
/* Try to transmit */
|
||||
capacity = xmit_skb(sq, skb);
|
||||
err = xmit_skb(sq, skb);
|
||||
|
||||
/* This can happen with OOM and indirect buffers. */
|
||||
if (unlikely(capacity < 0)) {
|
||||
if (likely(capacity == -ENOMEM)) {
|
||||
if (net_ratelimit())
|
||||
dev_warn(&dev->dev,
|
||||
"TXQ (%d) failure: out of memory\n",
|
||||
qnum);
|
||||
} else {
|
||||
dev->stats.tx_fifo_errors++;
|
||||
if (net_ratelimit())
|
||||
dev_warn(&dev->dev,
|
||||
"Unexpected TXQ (%d) failure: %d\n",
|
||||
qnum, capacity);
|
||||
}
|
||||
/* This should not happen! */
|
||||
if (unlikely(err)) {
|
||||
dev->stats.tx_fifo_errors++;
|
||||
if (net_ratelimit())
|
||||
dev_warn(&dev->dev,
|
||||
"Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
|
||||
dev->stats.tx_dropped++;
|
||||
kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
@@ -748,12 +738,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
/* Apparently nice girls don't return TX_BUSY; stop the queue
|
||||
* before it gets out of hand. Naturally, this wastes entries. */
|
||||
if (capacity < 2+MAX_SKB_FRAGS) {
|
||||
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
|
||||
netif_stop_subqueue(dev, qnum);
|
||||
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
|
||||
/* More just got used, free them then recheck. */
|
||||
capacity += free_old_xmit_skbs(sq);
|
||||
if (capacity >= 2+MAX_SKB_FRAGS) {
|
||||
free_old_xmit_skbs(sq);
|
||||
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
|
||||
netif_start_subqueue(dev, qnum);
|
||||
virtqueue_disable_cb(sq->vq);
|
||||
}
|
||||
|
||||
@@ -764,7 +764,7 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
|
||||
|
||||
/* add message to the remote processor's virtqueue */
|
||||
err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
|
||||
if (err < 0) {
|
||||
if (err) {
|
||||
/*
|
||||
* need to reclaim the buffer here, otherwise it's lost
|
||||
* (memory won't leak, but rpmsg won't use it again for TX).
|
||||
@@ -776,8 +776,6 @@ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
|
||||
|
||||
/* tell the remote processor it has a pending message to read */
|
||||
virtqueue_kick(vrp->svq);
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
mutex_unlock(&vrp->tx_lock);
|
||||
return err;
|
||||
@@ -980,7 +978,7 @@ static int rpmsg_probe(struct virtio_device *vdev)
|
||||
|
||||
err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
|
||||
GFP_KERNEL);
|
||||
WARN_ON(err < 0); /* sanity check; this can't really happen */
|
||||
WARN_ON(err); /* sanity check; this can't really happen */
|
||||
}
|
||||
|
||||
/* suppress "tx-complete" interrupts */
|
||||
|
||||
+13
-11
@@ -215,7 +215,7 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
|
||||
static int virtscsi_kick_event(struct virtio_scsi *vscsi,
|
||||
struct virtio_scsi_event_node *event_node)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
struct scatterlist sg;
|
||||
unsigned long flags;
|
||||
|
||||
@@ -223,13 +223,14 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
|
||||
|
||||
spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
|
||||
|
||||
ret = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC);
|
||||
if (ret >= 0)
|
||||
err = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node,
|
||||
GFP_ATOMIC);
|
||||
if (!err)
|
||||
virtqueue_kick(vscsi->event_vq.vq);
|
||||
|
||||
spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
|
||||
|
||||
return ret;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
|
||||
@@ -410,22 +411,23 @@ static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
|
||||
{
|
||||
unsigned int out_num, in_num;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
int err;
|
||||
bool needs_kick = false;
|
||||
|
||||
spin_lock_irqsave(&tgt->tgt_lock, flags);
|
||||
virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size);
|
||||
|
||||
spin_lock(&vq->vq_lock);
|
||||
ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
|
||||
err = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
|
||||
spin_unlock(&tgt->tgt_lock);
|
||||
if (ret >= 0)
|
||||
ret = virtqueue_kick_prepare(vq->vq);
|
||||
if (!err)
|
||||
needs_kick = virtqueue_kick_prepare(vq->vq);
|
||||
|
||||
spin_unlock_irqrestore(&vq->vq_lock, flags);
|
||||
|
||||
if (ret > 0)
|
||||
if (needs_kick)
|
||||
virtqueue_notify(vq->vq);
|
||||
return ret;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
|
||||
@@ -467,7 +469,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
|
||||
|
||||
if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
|
||||
sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
|
||||
GFP_ATOMIC) >= 0)
|
||||
GFP_ATOMIC) == 0)
|
||||
ret = 0;
|
||||
else
|
||||
mempool_free(cmd, virtscsi_cmd_pool);
|
||||
|
||||
+13
-17
@@ -10,33 +10,32 @@ static DEFINE_IDA(virtio_index_ida);
|
||||
static ssize_t device_show(struct device *_d,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
|
||||
struct virtio_device *dev = dev_to_virtio(_d);
|
||||
return sprintf(buf, "0x%04x\n", dev->id.device);
|
||||
}
|
||||
static ssize_t vendor_show(struct device *_d,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
|
||||
struct virtio_device *dev = dev_to_virtio(_d);
|
||||
return sprintf(buf, "0x%04x\n", dev->id.vendor);
|
||||
}
|
||||
static ssize_t status_show(struct device *_d,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
|
||||
struct virtio_device *dev = dev_to_virtio(_d);
|
||||
return sprintf(buf, "0x%08x\n", dev->config->get_status(dev));
|
||||
}
|
||||
static ssize_t modalias_show(struct device *_d,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
|
||||
|
||||
struct virtio_device *dev = dev_to_virtio(_d);
|
||||
return sprintf(buf, "virtio:d%08Xv%08X\n",
|
||||
dev->id.device, dev->id.vendor);
|
||||
}
|
||||
static ssize_t features_show(struct device *_d,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
|
||||
struct virtio_device *dev = dev_to_virtio(_d);
|
||||
unsigned int i;
|
||||
ssize_t len = 0;
|
||||
|
||||
@@ -71,10 +70,10 @@ static inline int virtio_id_match(const struct virtio_device *dev,
|
||||
static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
|
||||
{
|
||||
unsigned int i;
|
||||
struct virtio_device *dev = container_of(_dv,struct virtio_device,dev);
|
||||
struct virtio_device *dev = dev_to_virtio(_dv);
|
||||
const struct virtio_device_id *ids;
|
||||
|
||||
ids = container_of(_dr, struct virtio_driver, driver)->id_table;
|
||||
ids = drv_to_virtio(_dr)->id_table;
|
||||
for (i = 0; ids[i].device; i++)
|
||||
if (virtio_id_match(dev, &ids[i]))
|
||||
return 1;
|
||||
@@ -83,7 +82,7 @@ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
|
||||
|
||||
static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct virtio_device *dev = container_of(_dv,struct virtio_device,dev);
|
||||
struct virtio_device *dev = dev_to_virtio(_dv);
|
||||
|
||||
return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X",
|
||||
dev->id.device, dev->id.vendor);
|
||||
@@ -98,8 +97,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
|
||||
unsigned int fbit)
|
||||
{
|
||||
unsigned int i;
|
||||
struct virtio_driver *drv = container_of(vdev->dev.driver,
|
||||
struct virtio_driver, driver);
|
||||
struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
|
||||
|
||||
for (i = 0; i < drv->feature_table_size; i++)
|
||||
if (drv->feature_table[i] == fbit)
|
||||
@@ -111,9 +109,8 @@ EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature);
|
||||
static int virtio_dev_probe(struct device *_d)
|
||||
{
|
||||
int err, i;
|
||||
struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
|
||||
struct virtio_driver *drv = container_of(dev->dev.driver,
|
||||
struct virtio_driver, driver);
|
||||
struct virtio_device *dev = dev_to_virtio(_d);
|
||||
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
|
||||
u32 device_features;
|
||||
|
||||
/* We have a driver! */
|
||||
@@ -152,9 +149,8 @@ static int virtio_dev_probe(struct device *_d)
|
||||
|
||||
static int virtio_dev_remove(struct device *_d)
|
||||
{
|
||||
struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
|
||||
struct virtio_driver *drv = container_of(dev->dev.driver,
|
||||
struct virtio_driver, driver);
|
||||
struct virtio_device *dev = dev_to_virtio(_d);
|
||||
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
|
||||
|
||||
drv->remove(dev);
|
||||
|
||||
|
||||
@@ -139,10 +139,9 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
|
||||
struct page *page = balloon_page_enqueue(vb_dev_info);
|
||||
|
||||
if (!page) {
|
||||
if (printk_ratelimit())
|
||||
dev_printk(KERN_INFO, &vb->vdev->dev,
|
||||
"Out of puff! Can't get %u pages\n",
|
||||
VIRTIO_BALLOON_PAGES_PER_PAGE);
|
||||
dev_info_ratelimited(&vb->vdev->dev,
|
||||
"Out of puff! Can't get %u pages\n",
|
||||
VIRTIO_BALLOON_PAGES_PER_PAGE);
|
||||
/* Sleep for at least 1/5 of a second before retry. */
|
||||
msleep(200);
|
||||
break;
|
||||
|
||||
@@ -225,7 +225,7 @@ static void vm_notify(struct virtqueue *vq)
|
||||
|
||||
/* We write the queue's selector into the notification register to
|
||||
* signal the other end */
|
||||
writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
|
||||
writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
|
||||
}
|
||||
|
||||
/* Notify all virtqueues on an interrupt. */
|
||||
@@ -266,7 +266,7 @@ static void vm_del_vq(struct virtqueue *vq)
|
||||
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
|
||||
struct virtio_mmio_vq_info *info = vq->priv;
|
||||
unsigned long flags, size;
|
||||
unsigned int index = virtqueue_get_queue_index(vq);
|
||||
unsigned int index = vq->index;
|
||||
|
||||
spin_lock_irqsave(&vm_dev->lock, flags);
|
||||
list_del(&info->node);
|
||||
@@ -521,25 +521,33 @@ static int vm_cmdline_set(const char *device,
|
||||
int err;
|
||||
struct resource resources[2] = {};
|
||||
char *str;
|
||||
long long int base;
|
||||
long long int base, size;
|
||||
unsigned int irq;
|
||||
int processed, consumed = 0;
|
||||
struct platform_device *pdev;
|
||||
|
||||
resources[0].flags = IORESOURCE_MEM;
|
||||
resources[1].flags = IORESOURCE_IRQ;
|
||||
|
||||
resources[0].end = memparse(device, &str) - 1;
|
||||
/* Consume "size" part of the command line parameter */
|
||||
size = memparse(device, &str);
|
||||
|
||||
/* Get "@<base>:<irq>[:<id>]" chunks */
|
||||
processed = sscanf(str, "@%lli:%u%n:%d%n",
|
||||
&base, &resources[1].start, &consumed,
|
||||
&base, &irq, &consumed,
|
||||
&vm_cmdline_id, &consumed);
|
||||
|
||||
if (processed < 2 || processed > 3 || str[consumed])
|
||||
/*
|
||||
* sscanf() must processes at least 2 chunks; also there
|
||||
* must be no extra characters after the last chunk, so
|
||||
* str[consumed] must be '\0'
|
||||
*/
|
||||
if (processed < 2 || str[consumed])
|
||||
return -EINVAL;
|
||||
|
||||
resources[0].flags = IORESOURCE_MEM;
|
||||
resources[0].start = base;
|
||||
resources[0].end += base;
|
||||
resources[1].end = resources[1].start;
|
||||
resources[0].end = base + size - 1;
|
||||
|
||||
resources[1].flags = IORESOURCE_IRQ;
|
||||
resources[1].start = resources[1].end = irq;
|
||||
|
||||
if (!vm_cmdline_parent_registered) {
|
||||
err = device_register(&vm_cmdline_parent);
|
||||
|
||||
@@ -203,8 +203,7 @@ static void vp_notify(struct virtqueue *vq)
|
||||
|
||||
/* we write the queue's selector into the notification register to
|
||||
* signal the other end */
|
||||
iowrite16(virtqueue_get_queue_index(vq),
|
||||
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
|
||||
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
|
||||
}
|
||||
|
||||
/* Handle a configuration change: Tell driver if it wants to know. */
|
||||
@@ -479,8 +478,7 @@ static void vp_del_vq(struct virtqueue *vq)
|
||||
list_del(&info->node);
|
||||
spin_unlock_irqrestore(&vp_dev->lock, flags);
|
||||
|
||||
iowrite16(virtqueue_get_queue_index(vq),
|
||||
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
|
||||
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
|
||||
|
||||
if (vp_dev->msix_enabled) {
|
||||
iowrite16(VIRTIO_MSI_NO_VECTOR,
|
||||
@@ -830,16 +828,4 @@ static struct pci_driver virtio_pci_driver = {
|
||||
#endif
|
||||
};
|
||||
|
||||
static int __init virtio_pci_init(void)
|
||||
{
|
||||
return pci_register_driver(&virtio_pci_driver);
|
||||
}
|
||||
|
||||
module_init(virtio_pci_init);
|
||||
|
||||
static void __exit virtio_pci_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&virtio_pci_driver);
|
||||
}
|
||||
|
||||
module_exit(virtio_pci_exit);
|
||||
module_pci_driver(virtio_pci_driver);
|
||||
|
||||
@@ -93,8 +93,6 @@ struct vring_virtqueue
|
||||
/* Host publishes avail event idx */
|
||||
bool event;
|
||||
|
||||
/* Number of free buffers */
|
||||
unsigned int num_free;
|
||||
/* Head of free buffer list. */
|
||||
unsigned int free_head;
|
||||
/* Number we've added since last sync. */
|
||||
@@ -106,9 +104,6 @@ struct vring_virtqueue
|
||||
/* How to notify other side. FIXME: commonalize hcalls! */
|
||||
void (*notify)(struct virtqueue *vq);
|
||||
|
||||
/* Index of the queue */
|
||||
int queue_index;
|
||||
|
||||
#ifdef DEBUG
|
||||
/* They're supposed to lock for us. */
|
||||
unsigned int in_use;
|
||||
@@ -135,6 +130,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
|
||||
unsigned head;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We require lowmem mappings for the descriptors because
|
||||
* otherwise virt_to_phys will give us bogus addresses in the
|
||||
* virtqueue.
|
||||
*/
|
||||
gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
|
||||
|
||||
desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
|
||||
if (!desc)
|
||||
return -ENOMEM;
|
||||
@@ -160,7 +162,7 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
|
||||
desc[i-1].next = 0;
|
||||
|
||||
/* We're about to use a buffer */
|
||||
vq->num_free--;
|
||||
vq->vq.num_free--;
|
||||
|
||||
/* Use a single buffer which doesn't continue */
|
||||
head = vq->free_head;
|
||||
@@ -174,13 +176,6 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
|
||||
return head;
|
||||
}
|
||||
|
||||
int virtqueue_get_queue_index(struct virtqueue *_vq)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
return vq->queue_index;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_get_queue_index);
|
||||
|
||||
/**
|
||||
* virtqueue_add_buf - expose buffer to other end
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
@@ -193,10 +188,7 @@ EXPORT_SYMBOL_GPL(virtqueue_get_queue_index);
|
||||
* Caller must ensure we don't call this with other virtqueue operations
|
||||
* at the same time (except where noted).
|
||||
*
|
||||
* Returns remaining capacity of queue or a negative error
|
||||
* (ie. ENOSPC). Note that it only really makes sense to treat all
|
||||
* positive return values as "available": indirect buffers mean that
|
||||
* we can put an entire sg[] array inside a single queue entry.
|
||||
* Returns zero or a negative error (ie. ENOSPC, ENOMEM).
|
||||
*/
|
||||
int virtqueue_add_buf(struct virtqueue *_vq,
|
||||
struct scatterlist sg[],
|
||||
@@ -228,7 +220,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
|
||||
|
||||
/* If the host supports indirect descriptor tables, and we have multiple
|
||||
* buffers, then go indirect. FIXME: tune this threshold */
|
||||
if (vq->indirect && (out + in) > 1 && vq->num_free) {
|
||||
if (vq->indirect && (out + in) > 1 && vq->vq.num_free) {
|
||||
head = vring_add_indirect(vq, sg, out, in, gfp);
|
||||
if (likely(head >= 0))
|
||||
goto add_head;
|
||||
@@ -237,9 +229,9 @@ int virtqueue_add_buf(struct virtqueue *_vq,
|
||||
BUG_ON(out + in > vq->vring.num);
|
||||
BUG_ON(out + in == 0);
|
||||
|
||||
if (vq->num_free < out + in) {
|
||||
if (vq->vq.num_free < out + in) {
|
||||
pr_debug("Can't add buf len %i - avail = %i\n",
|
||||
out + in, vq->num_free);
|
||||
out + in, vq->vq.num_free);
|
||||
/* FIXME: for historical reasons, we force a notify here if
|
||||
* there are outgoing parts to the buffer. Presumably the
|
||||
* host should service the ring ASAP. */
|
||||
@@ -250,7 +242,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
|
||||
}
|
||||
|
||||
/* We're about to use some buffers from the free list. */
|
||||
vq->num_free -= out + in;
|
||||
vq->vq.num_free -= out + in;
|
||||
|
||||
head = vq->free_head;
|
||||
for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
|
||||
@@ -296,7 +288,7 @@ add_head:
|
||||
pr_debug("Added buffer head %i to %p\n", head, vq);
|
||||
END_USE(vq);
|
||||
|
||||
return vq->num_free;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_add_buf);
|
||||
|
||||
@@ -393,13 +385,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
|
||||
|
||||
while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
|
||||
i = vq->vring.desc[i].next;
|
||||
vq->num_free++;
|
||||
vq->vq.num_free++;
|
||||
}
|
||||
|
||||
vq->vring.desc[i].next = vq->free_head;
|
||||
vq->free_head = head;
|
||||
/* Plus final descriptor */
|
||||
vq->num_free++;
|
||||
vq->vq.num_free++;
|
||||
}
|
||||
|
||||
static inline bool more_used(const struct vring_virtqueue *vq)
|
||||
@@ -599,7 +591,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
|
||||
return buf;
|
||||
}
|
||||
/* That should have freed everything. */
|
||||
BUG_ON(vq->num_free != vq->vring.num);
|
||||
BUG_ON(vq->vq.num_free != vq->vring.num);
|
||||
|
||||
END_USE(vq);
|
||||
return NULL;
|
||||
@@ -653,12 +645,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
|
||||
vq->vq.callback = callback;
|
||||
vq->vq.vdev = vdev;
|
||||
vq->vq.name = name;
|
||||
vq->vq.num_free = num;
|
||||
vq->vq.index = index;
|
||||
vq->notify = notify;
|
||||
vq->weak_barriers = weak_barriers;
|
||||
vq->broken = false;
|
||||
vq->last_used_idx = 0;
|
||||
vq->num_added = 0;
|
||||
vq->queue_index = index;
|
||||
list_add_tail(&vq->vq.list, &vdev->vqs);
|
||||
#ifdef DEBUG
|
||||
vq->in_use = false;
|
||||
@@ -673,7 +666,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
|
||||
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
|
||||
|
||||
/* Put everything in free lists. */
|
||||
vq->num_free = num;
|
||||
vq->free_head = 0;
|
||||
for (i = 0; i < num-1; i++) {
|
||||
vq->vring.desc[i].next = i+1;
|
||||
|
||||
+23
-2
@@ -16,12 +16,20 @@
|
||||
* @name: the name of this virtqueue (mainly for debugging)
|
||||
* @vdev: the virtio device this queue was created for.
|
||||
* @priv: a pointer for the virtqueue implementation to use.
|
||||
* @index: the zero-based ordinal number for this queue.
|
||||
* @num_free: number of elements we expect to be able to fit.
|
||||
*
|
||||
* A note on @num_free: with indirect buffers, each buffer needs one
|
||||
* element in the queue, otherwise a buffer will need one element per
|
||||
* sg element.
|
||||
*/
|
||||
struct virtqueue {
|
||||
struct list_head list;
|
||||
void (*callback)(struct virtqueue *vq);
|
||||
const char *name;
|
||||
struct virtio_device *vdev;
|
||||
unsigned int index;
|
||||
unsigned int num_free;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
@@ -50,7 +58,11 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq);
|
||||
|
||||
unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
|
||||
|
||||
int virtqueue_get_queue_index(struct virtqueue *vq);
|
||||
/* FIXME: Obsolete accessor, but required for virtio_net merge. */
|
||||
static inline unsigned int virtqueue_get_queue_index(struct virtqueue *vq)
|
||||
{
|
||||
return vq->index;
|
||||
}
|
||||
|
||||
/**
|
||||
* virtio_device - representation of a device using virtio
|
||||
@@ -73,7 +85,11 @@ struct virtio_device {
|
||||
void *priv;
|
||||
};
|
||||
|
||||
#define dev_to_virtio(dev) container_of(dev, struct virtio_device, dev)
|
||||
static inline struct virtio_device *dev_to_virtio(struct device *_dev)
|
||||
{
|
||||
return container_of(_dev, struct virtio_device, dev);
|
||||
}
|
||||
|
||||
int register_virtio_device(struct virtio_device *dev);
|
||||
void unregister_virtio_device(struct virtio_device *dev);
|
||||
|
||||
@@ -103,6 +119,11 @@ struct virtio_driver {
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
|
||||
{
|
||||
return container_of(drv, struct virtio_driver, driver);
|
||||
}
|
||||
|
||||
int register_virtio_driver(struct virtio_driver *drv);
|
||||
void unregister_virtio_driver(struct virtio_driver *drv);
|
||||
#endif /* _LINUX_VIRTIO_H */
|
||||
|
||||
@@ -1,7 +1,31 @@
|
||||
/*
|
||||
* This header is BSD licensed so anyone can use the definitions to implement
|
||||
* compatible drivers/servers.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_VIRTIO_SCSI_H
|
||||
#define _LINUX_VIRTIO_SCSI_H
|
||||
/* This header is BSD licensed so anyone can use the definitions to implement
|
||||
* compatible drivers/servers. */
|
||||
|
||||
#define VIRTIO_SCSI_CDB_SIZE 32
|
||||
#define VIRTIO_SCSI_SENSE_SIZE 96
|
||||
|
||||
@@ -37,5 +37,6 @@
|
||||
#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */
|
||||
#define VIRTIO_ID_SCSI 8 /* virtio scsi */
|
||||
#define VIRTIO_ID_9P 9 /* 9p virtio console */
|
||||
#define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */
|
||||
|
||||
#endif /* _LINUX_VIRTIO_IDS_H */
|
||||
|
||||
@@ -105,6 +105,7 @@ struct page *kmap_to_page(void *vaddr)
|
||||
|
||||
return virt_to_page(addr);
|
||||
}
|
||||
EXPORT_SYMBOL(kmap_to_page);
|
||||
|
||||
static void flush_all_zero_pkmaps(void)
|
||||
{
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
#include <linux/inet.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/9p/9p.h>
|
||||
#include <linux/parser.h>
|
||||
@@ -325,7 +326,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
|
||||
int count = nr_pages;
|
||||
while (nr_pages) {
|
||||
s = rest_of_page(data);
|
||||
pages[index++] = virt_to_page(data);
|
||||
pages[index++] = kmap_to_page(data);
|
||||
data += s;
|
||||
nr_pages--;
|
||||
}
|
||||
|
||||
+35
-49
@@ -179,29 +179,6 @@ static struct termios orig_term;
|
||||
#define wmb() __asm__ __volatile__("" : : : "memory")
|
||||
#define mb() __asm__ __volatile__("" : : : "memory")
|
||||
|
||||
/*
|
||||
* Convert an iovec element to the given type.
|
||||
*
|
||||
* This is a fairly ugly trick: we need to know the size of the type and
|
||||
* alignment requirement to check the pointer is kosher. It's also nice to
|
||||
* have the name of the type in case we report failure.
|
||||
*
|
||||
* Typing those three things all the time is cumbersome and error prone, so we
|
||||
* have a macro which sets them all up and passes to the real function.
|
||||
*/
|
||||
#define convert(iov, type) \
|
||||
((type *)_convert((iov), sizeof(type), __alignof__(type), #type))
|
||||
|
||||
static void *_convert(struct iovec *iov, size_t size, size_t align,
|
||||
const char *name)
|
||||
{
|
||||
if (iov->iov_len != size)
|
||||
errx(1, "Bad iovec size %zu for %s", iov->iov_len, name);
|
||||
if ((unsigned long)iov->iov_base % align != 0)
|
||||
errx(1, "Bad alignment %p for %s", iov->iov_base, name);
|
||||
return iov->iov_base;
|
||||
}
|
||||
|
||||
/* Wrapper for the last available index. Makes it easier to change. */
|
||||
#define lg_last_avail(vq) ((vq)->last_avail_idx)
|
||||
|
||||
@@ -228,7 +205,8 @@ static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
|
||||
}
|
||||
|
||||
/* Take len bytes from the front of this iovec. */
|
||||
static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len)
|
||||
static void iov_consume(struct iovec iov[], unsigned num_iov,
|
||||
void *dest, unsigned len)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
@@ -236,11 +214,16 @@ static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len)
|
||||
unsigned int used;
|
||||
|
||||
used = iov[i].iov_len < len ? iov[i].iov_len : len;
|
||||
if (dest) {
|
||||
memcpy(dest, iov[i].iov_base, used);
|
||||
dest += used;
|
||||
}
|
||||
iov[i].iov_base += used;
|
||||
iov[i].iov_len -= used;
|
||||
len -= used;
|
||||
}
|
||||
assert(len == 0);
|
||||
if (len != 0)
|
||||
errx(1, "iovec too short!");
|
||||
}
|
||||
|
||||
/* The device virtqueue descriptors are followed by feature bitmasks. */
|
||||
@@ -864,7 +847,7 @@ static void console_output(struct virtqueue *vq)
|
||||
warn("Write to stdout gave %i (%d)", len, errno);
|
||||
break;
|
||||
}
|
||||
iov_consume(iov, out, len);
|
||||
iov_consume(iov, out, NULL, len);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1591,9 +1574,9 @@ static void blk_request(struct virtqueue *vq)
|
||||
{
|
||||
struct vblk_info *vblk = vq->dev->priv;
|
||||
unsigned int head, out_num, in_num, wlen;
|
||||
int ret;
|
||||
int ret, i;
|
||||
u8 *in;
|
||||
struct virtio_blk_outhdr *out;
|
||||
struct virtio_blk_outhdr out;
|
||||
struct iovec iov[vq->vring.num];
|
||||
off64_t off;
|
||||
|
||||
@@ -1603,32 +1586,36 @@ static void blk_request(struct virtqueue *vq)
|
||||
*/
|
||||
head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
|
||||
|
||||
/*
|
||||
* Every block request should contain at least one output buffer
|
||||
* (detailing the location on disk and the type of request) and one
|
||||
* input buffer (to hold the result).
|
||||
*/
|
||||
if (out_num == 0 || in_num == 0)
|
||||
errx(1, "Bad virtblk cmd %u out=%u in=%u",
|
||||
head, out_num, in_num);
|
||||
/* Copy the output header from the front of the iov (adjusts iov) */
|
||||
iov_consume(iov, out_num, &out, sizeof(out));
|
||||
|
||||
/* Find and trim end of iov input array, for our status byte. */
|
||||
in = NULL;
|
||||
for (i = out_num + in_num - 1; i >= out_num; i--) {
|
||||
if (iov[i].iov_len > 0) {
|
||||
in = iov[i].iov_base + iov[i].iov_len - 1;
|
||||
iov[i].iov_len--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!in)
|
||||
errx(1, "Bad virtblk cmd with no room for status");
|
||||
|
||||
out = convert(&iov[0], struct virtio_blk_outhdr);
|
||||
in = convert(&iov[out_num+in_num-1], u8);
|
||||
/*
|
||||
* For historical reasons, block operations are expressed in 512 byte
|
||||
* "sectors".
|
||||
*/
|
||||
off = out->sector * 512;
|
||||
off = out.sector * 512;
|
||||
|
||||
/*
|
||||
* In general the virtio block driver is allowed to try SCSI commands.
|
||||
* It'd be nice if we supported eject, for example, but we don't.
|
||||
*/
|
||||
if (out->type & VIRTIO_BLK_T_SCSI_CMD) {
|
||||
if (out.type & VIRTIO_BLK_T_SCSI_CMD) {
|
||||
fprintf(stderr, "Scsi commands unsupported\n");
|
||||
*in = VIRTIO_BLK_S_UNSUPP;
|
||||
wlen = sizeof(*in);
|
||||
} else if (out->type & VIRTIO_BLK_T_OUT) {
|
||||
} else if (out.type & VIRTIO_BLK_T_OUT) {
|
||||
/*
|
||||
* Write
|
||||
*
|
||||
@@ -1636,10 +1623,10 @@ static void blk_request(struct virtqueue *vq)
|
||||
* if they try to write past end.
|
||||
*/
|
||||
if (lseek64(vblk->fd, off, SEEK_SET) != off)
|
||||
err(1, "Bad seek to sector %llu", out->sector);
|
||||
err(1, "Bad seek to sector %llu", out.sector);
|
||||
|
||||
ret = writev(vblk->fd, iov+1, out_num-1);
|
||||
verbose("WRITE to sector %llu: %i\n", out->sector, ret);
|
||||
ret = writev(vblk->fd, iov, out_num);
|
||||
verbose("WRITE to sector %llu: %i\n", out.sector, ret);
|
||||
|
||||
/*
|
||||
* Grr... Now we know how long the descriptor they sent was, we
|
||||
@@ -1655,7 +1642,7 @@ static void blk_request(struct virtqueue *vq)
|
||||
|
||||
wlen = sizeof(*in);
|
||||
*in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
|
||||
} else if (out->type & VIRTIO_BLK_T_FLUSH) {
|
||||
} else if (out.type & VIRTIO_BLK_T_FLUSH) {
|
||||
/* Flush */
|
||||
ret = fdatasync(vblk->fd);
|
||||
verbose("FLUSH fdatasync: %i\n", ret);
|
||||
@@ -1669,10 +1656,9 @@ static void blk_request(struct virtqueue *vq)
|
||||
* if they try to read past end.
|
||||
*/
|
||||
if (lseek64(vblk->fd, off, SEEK_SET) != off)
|
||||
err(1, "Bad seek to sector %llu", out->sector);
|
||||
err(1, "Bad seek to sector %llu", out.sector);
|
||||
|
||||
ret = readv(vblk->fd, iov+1, in_num-1);
|
||||
verbose("READ from sector %llu: %i\n", out->sector, ret);
|
||||
ret = readv(vblk->fd, iov + out_num, in_num);
|
||||
if (ret >= 0) {
|
||||
wlen = sizeof(*in) + ret;
|
||||
*in = VIRTIO_BLK_S_OK;
|
||||
@@ -1758,7 +1744,7 @@ static void rng_input(struct virtqueue *vq)
|
||||
len = readv(rng_info->rfd, iov, in_num);
|
||||
if (len <= 0)
|
||||
err(1, "Read from /dev/random gave %i", len);
|
||||
iov_consume(iov, in_num, len);
|
||||
iov_consume(iov, in_num, NULL, len);
|
||||
totlen += len;
|
||||
}
|
||||
|
||||
|
||||
@@ -164,7 +164,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
|
||||
r = virtqueue_add_buf(vq->vq, &sl, 1, 0,
|
||||
dev->buf + started,
|
||||
GFP_ATOMIC);
|
||||
if (likely(r >= 0)) {
|
||||
if (likely(r == 0)) {
|
||||
++started;
|
||||
virtqueue_kick(vq->vq);
|
||||
}
|
||||
@@ -177,7 +177,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
|
||||
r = 0;
|
||||
}
|
||||
|
||||
} while (r >= 0);
|
||||
} while (r == 0);
|
||||
if (completed == completed_before)
|
||||
++spurious;
|
||||
assert(completed <= bufs);
|
||||
|
||||
Reference in New Issue
Block a user