2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-31 06:15:47 +00:00

netdev-dpdk: vhost: Fix txq enabling in the absence of notifications.

According to QEMU documentation (docs/specs/vhost-user.txt) one queue
should be enabled initially. More queues are enabled dynamically, by
sending message VHOST_USER_SET_VRING_ENABLE.

Currently all queues in OVS disabled by default. This breaks above
specification. So, queue #0 should be enabled by default to support
QEMU versions less than 2.5 and fix probable issues if QEMU will not
send VHOST_USER_SET_VRING_ENABLE for queue #0 according to documentation.
Also this will fix currently broken vhost-cuse support in OVS.

Fixes: 585a5beaa2 ("netdev-dpdk: vhost-user: Fix sending packets to
                      queues not enabled by guest.")
Reported-by: Mauricio Vasquez B <mauricio.vasquezbernal@studenti.polito.it>
Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
Acked-by: Daniele Di Proietto <diproiettod@vmware.com>
This commit is contained in:
Ilya Maximets
2016-03-29 09:20:41 +03:00
committed by Daniele Di Proietto
parent 910596583d
commit f3ea2ad27f

View File

@@ -103,6 +103,9 @@ BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
#define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
#define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
#define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
#define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
* yet mapped to another queue. */
static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
@@ -671,7 +674,7 @@ netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
}
/* Initialize map for vhost devices. */
netdev->tx_q[i].map = -1;
netdev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
rte_spinlock_init(&netdev->tx_q[i].tx_lock);
}
}
@@ -1265,7 +1268,7 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
qid = vhost_dev->tx_q[qid % vhost_dev->real_n_txq].map;
if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid == -1)) {
if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid < 0)) {
rte_spinlock_lock(&vhost_dev->stats_lock);
vhost_dev->stats.tx_dropped+= cnt;
rte_spinlock_unlock(&vhost_dev->stats_lock);
@@ -2019,7 +2022,7 @@ netdev_dpdk_remap_txqs(struct netdev_dpdk *netdev)
}
if (n_enabled == 0 && total_txqs != 0) {
enabled_queues[0] = -1;
enabled_queues[0] = OVS_VHOST_QUEUE_DISABLED;
n_enabled = 1;
}
@@ -2056,6 +2059,10 @@ netdev_dpdk_vhost_set_queues(struct netdev_dpdk *netdev, struct virtio_net *dev)
netdev->real_n_rxq = qp_num;
netdev->real_n_txq = qp_num;
netdev->txq_needs_locking = true;
/* Enable TX queue 0 by default if it wasn't disabled. */
if (netdev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) {
netdev->tx_q[0].map = 0;
}
netdev_dpdk_remap_txqs(netdev);
@@ -2104,6 +2111,18 @@ new_device(struct virtio_net *dev)
return 0;
}
/* Clears mapping for all available queues of vhost interface. */
static void
netdev_dpdk_txq_map_clear(struct netdev_dpdk *dev)
OVS_REQUIRES(dev->mutex)
{
int i;
for (i = 0; i < dev->real_n_txq; i++) {
dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
}
}
/*
* Remove a virtio-net device from the specific vhost port. Use dev->remove
* flag to stop any more packets from being sent or received to/from a VM and
@@ -2123,6 +2142,7 @@ destroy_device(volatile struct virtio_net *dev)
ovs_mutex_lock(&vhost_dev->mutex);
dev->flags &= ~VIRTIO_DEV_RUNNING;
ovsrcu_set(&vhost_dev->virtio_dev, NULL);
netdev_dpdk_txq_map_clear(vhost_dev);
exists = true;
ovs_mutex_unlock(&vhost_dev->mutex);
break;
@@ -2169,7 +2189,7 @@ vring_state_changed(struct virtio_net *dev, uint16_t queue_id, int enable)
if (enable) {
vhost_dev->tx_q[qid].map = qid;
} else {
vhost_dev->tx_q[qid].map = -1;
vhost_dev->tx_q[qid].map = OVS_VHOST_QUEUE_DISABLED;
}
netdev_dpdk_remap_txqs(vhost_dev);
exists = true;