mirror of
https://github.com/openvswitch/ovs
synced 2025-08-22 01:51:26 +00:00
netdev-dpdk: Delay vhost mempool creation.
Currently mempools for vhost are being assigned before the vhost device is added. In some cases this may be just reusing an existing mempool but in others it can require creation of a mempool. For multi-NUMA, the NUMA info of the vhost port is not known until a device is added to the port, so on multi-NUMA systems the initial NUMA node for the mempool is a best guess based on vswitchd affinity. When a device is added to the vhost port, the NUMA info can be checked and if the guess was incorrect a mempool on the correct NUMA node created. For multi-NUMA, the current scheme can have the effect of creating a mempool on a NUMA node that will not be needed and at least for a certain time period requires more memory on a NUMA node. It is also difficult for a user trying to provision memory on different NUMA nodes, if they are not sure which NUMA node the initial mempool for a vhost port will be on. For single NUMA, even though the mempool will be on the correct NUMA, it is assigned ahead of time and if a vhost device was not added, it could also be using uneeded memory. This patch delays the creation of the mempool for a vhost port until the vhost device is added. Signed-off-by: Kevin Traynor <ktraynor@redhat.com> Reviewed-by: David Marchand <david.marchand@redhat.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
This commit is contained in:
parent
b80f58cde2
commit
0dd409c2a2
3
NEWS
3
NEWS
@ -39,6 +39,9 @@ Post-v2.17.0
|
||||
- DPDK:
|
||||
* OVS validated with DPDK 21.11.1. It is recommended to use this version
|
||||
until further releases.
|
||||
* Delay creating or reusing a mempool for vhost ports until the VM
|
||||
is started. A failure to create a mempool will now be logged only
|
||||
when the VM is started.
|
||||
- Userspace datapath:
|
||||
* 'dpif-netdev/subtable-lookup-prio-get' appctl command renamed to
|
||||
'dpif-netdev/subtable-lookup-info-get' to better reflect its purpose.
|
||||
|
@ -3926,7 +3926,8 @@ new_device(int vid)
|
||||
|
||||
if (dev->requested_n_txq < qp_num
|
||||
|| dev->requested_n_rxq < qp_num
|
||||
|| dev->requested_socket_id != newnode) {
|
||||
|| dev->requested_socket_id != newnode
|
||||
|| dev->dpdk_mp == NULL) {
|
||||
dev->requested_socket_id = newnode;
|
||||
dev->requested_n_rxq = qp_num;
|
||||
dev->requested_n_txq = qp_num;
|
||||
@ -4976,7 +4977,6 @@ dpdk_vhost_reconfigure_helper(struct netdev_dpdk *dev)
|
||||
{
|
||||
dev->up.n_txq = dev->requested_n_txq;
|
||||
dev->up.n_rxq = dev->requested_n_rxq;
|
||||
int err;
|
||||
|
||||
/* Always keep RX queue 0 enabled for implementations that won't
|
||||
* report vring states. */
|
||||
@ -4994,14 +4994,17 @@ dpdk_vhost_reconfigure_helper(struct netdev_dpdk *dev)
|
||||
|
||||
netdev_dpdk_remap_txqs(dev);
|
||||
|
||||
err = netdev_dpdk_mempool_configure(dev);
|
||||
if (!err) {
|
||||
/* A new mempool was created or re-used. */
|
||||
netdev_change_seq_changed(&dev->up);
|
||||
} else if (err != EEXIST) {
|
||||
return err;
|
||||
}
|
||||
if (netdev_dpdk_get_vid(dev) >= 0) {
|
||||
int err;
|
||||
|
||||
err = netdev_dpdk_mempool_configure(dev);
|
||||
if (!err) {
|
||||
/* A new mempool was created or re-used. */
|
||||
netdev_change_seq_changed(&dev->up);
|
||||
} else if (err != EEXIST) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (dev->vhost_reconfigured == false) {
|
||||
dev->vhost_reconfigured = true;
|
||||
/* Carrier status may need updating. */
|
||||
|
Loading…
x
Reference in New Issue
Block a user