2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-31 06:15:47 +00:00
Files
ovs/tests/pmd.at

721 lines
30 KiB
Plaintext
Raw Normal View History

AT_BANNER([PMD])
m4_divert_push([PREPARE_TESTS])
# Given the output of `ovs-appctl dpif-netdev/pmd-rxq-show`, prints a list
# of every rxq (one per line) in the form:
# port_name rxq_id numa_id core_id
parse_pmd_rxq_show () {
awk '/pmd thread/ {numa=$4; core=substr($6, 1, length($6) - 1)} /^ port:/ {print $2, $4, numa, core}' | sort
}
# Given the output of `ovs-appctl dpif-netdev/pmd-rxq-show`,
# and with queues for each core on one line, prints the rxqs
# of the core on one line
# 'port:' port_name 'queue_id:' rxq_id rxq_id rxq_id rxq_id
parse_pmd_rxq_show_group () {
dpif-netdev: Only poll enabled vhost queues. We currently poll all available queues based on the max queue count exchanged with the vhost peer and rely on the vhost library in DPDK to check the vring status beneath. This can lead to some overhead when we have a lot of unused queues. To enhance the situation, we can skip the disabled queues. On rxq notifications, we make use of the netdev's change_seq number so that the pmd thread main loop can cache the queue state periodically. $ ovs-appctl dpif-netdev/pmd-rxq-show pmd thread numa_id 0 core_id 1: isolated : true port: dpdk0 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 2: isolated : true port: vhost1 queue-id: 0 (enabled) pmd usage: 0 % port: vhost3 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 15: isolated : true port: dpdk1 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 16: isolated : true port: vhost0 queue-id: 0 (enabled) pmd usage: 0 % port: vhost2 queue-id: 0 (enabled) pmd usage: 0 % $ while true; do ovs-appctl dpif-netdev/pmd-rxq-show |awk ' /port: / { tot++; if ($5 == "(enabled)") { en++; } } END { print "total: " tot ", enabled: " en }' sleep 1 done total: 6, enabled: 2 total: 6, enabled: 2 ... # Started vm, virtio devices are bound to kernel driver which enables # F_MQ + all queue pairs total: 6, enabled: 2 total: 66, enabled: 66 ... # Unbound vhost0 and vhost1 from the kernel driver total: 66, enabled: 66 total: 66, enabled: 34 ... # Configured kernel bound devices to use only 1 queue pair total: 66, enabled: 34 total: 66, enabled: 19 total: 66, enabled: 4 ... # While rebooting the vm total: 66, enabled: 4 total: 66, enabled: 2 ... total: 66, enabled: 66 ... # After shutting down the vm total: 66, enabled: 66 total: 66, enabled: 2 Signed-off-by: David Marchand <david.marchand@redhat.com> Acked-by: Ilya Maximets <i.maximets@samsung.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
2019-04-25 17:22:08 +02:00
awk '/port:/ {print $1, $2, $3, $4, $13, $22, $31}'
}
# Given the output of `ovs-appctl dpctl/dump-flows`, prints a list of flows
# (one per line), with the pmd_id at the beginning of the line
#
flow_dump_prepend_pmd () {
awk '/flow-dump from non-dpdk/ {pmd_id=-1; next} /flow-dump from pmd/ {pmd_id=$7; next} {print pmd_id, $0}' | sort
}
m4_divert_pop([PREPARE_TESTS])
dnl CHECK_CPU_DISCOVERED([n_cpu])
dnl
dnl Waits until CPUs discovered and checks if number of discovered CPUs
dnl is greater or equal to 'n_cpu'. Without parameters checks that at
dnl least one CPU discovered.
m4_define([CHECK_CPU_DISCOVERED], [
PATTERN="Discovered [[0-9]]* NUMA nodes and [[0-9]]* CPU cores"
OVS_WAIT_UNTIL([grep "$PATTERN" ovs-vswitchd.log])
N_CPU=$(grep "$PATTERN" ovs-vswitchd.log | sed -e 's/.* \([[0-9]]*\) CPU cores/\1/')
if [[ -z "$1" ]]
then AT_CHECK([test "$N_CPU" -gt "0"])
else AT_SKIP_IF([test "$N_CPU" -lt "$1"])
fi
])
dnl CHECK_PMD_THREADS_CREATED([n_threads], [numa_id], [+line])
dnl
dnl Whaits for creation of 'n_threads' or at least 1 thread if $1 not
dnl passed. Checking starts from line number 'line' in ovs-vswithd.log .
m4_define([CHECK_PMD_THREADS_CREATED], [
PATTERN="There are [[0-9]]* pmd threads on numa node $2"
line_st=$3
if [[ -z "$line_st" ]]
then
line_st="+0"
fi
OVS_WAIT_UNTIL([tail -n $line_st ovs-vswitchd.log | grep "$PATTERN"])
N_THREADS=$(tail -n $line_st ovs-vswitchd.log | grep "$PATTERN" | tail -1 | sed -e 's/.* \([[0-9]]*\) pmd .*/\1/')
if [[ -z "$1" ]]
then AT_CHECK([test "$N_THREADS" -gt 0])
else AT_CHECK([test "$N_THREADS" -eq "$1"])
fi
])
m4_define([SED_NUMA_CORE_PATTERN], ["s/\(numa_id \)[[0-9]]*\( core_id \)[[0-9]]*:/\1<cleared>\2<cleared>:/"])
m4_define([DUMMY_NUMA], [--dummy-numa="0,0,0,0"])
AT_SETUP([PMD - creating a thread/add-port])
OVS_VSWITCHD_START([add-port br0 p0 -- set Interface p0 type=dummy-pmd], [], [], [DUMMY_NUMA])
CHECK_CPU_DISCOVERED()
CHECK_PMD_THREADS_CREATED()
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed SED_NUMA_CORE_PATTERN], [0], [dnl
pmd thread numa_id <cleared> core_id <cleared>:
isolated : false
dpif-netdev: Only poll enabled vhost queues. We currently poll all available queues based on the max queue count exchanged with the vhost peer and rely on the vhost library in DPDK to check the vring status beneath. This can lead to some overhead when we have a lot of unused queues. To enhance the situation, we can skip the disabled queues. On rxq notifications, we make use of the netdev's change_seq number so that the pmd thread main loop can cache the queue state periodically. $ ovs-appctl dpif-netdev/pmd-rxq-show pmd thread numa_id 0 core_id 1: isolated : true port: dpdk0 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 2: isolated : true port: vhost1 queue-id: 0 (enabled) pmd usage: 0 % port: vhost3 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 15: isolated : true port: dpdk1 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 16: isolated : true port: vhost0 queue-id: 0 (enabled) pmd usage: 0 % port: vhost2 queue-id: 0 (enabled) pmd usage: 0 % $ while true; do ovs-appctl dpif-netdev/pmd-rxq-show |awk ' /port: / { tot++; if ($5 == "(enabled)") { en++; } } END { print "total: " tot ", enabled: " en }' sleep 1 done total: 6, enabled: 2 total: 6, enabled: 2 ... # Started vm, virtio devices are bound to kernel driver which enables # F_MQ + all queue pairs total: 6, enabled: 2 total: 66, enabled: 66 ... # Unbound vhost0 and vhost1 from the kernel driver total: 66, enabled: 66 total: 66, enabled: 34 ... # Configured kernel bound devices to use only 1 queue pair total: 66, enabled: 34 total: 66, enabled: 19 total: 66, enabled: 4 ... # While rebooting the vm total: 66, enabled: 4 total: 66, enabled: 2 ... total: 66, enabled: 66 ... # After shutting down the vm total: 66, enabled: 66 total: 66, enabled: 2 Signed-off-by: David Marchand <david.marchand@redhat.com> Acked-by: Ilya Maximets <i.maximets@samsung.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
2019-04-25 17:22:08 +02:00
port: p0 queue-id: 0 (enabled) pmd usage: NOT AVAIL
])
AT_CHECK([ovs-appctl dpif/show | sed 's/\(tx_queues=\)[[0-9]]*/\1<cleared>/g'], [0], [dnl
dummy@ovs-dummy: hit:0 missed:0
br0:
br0 65534/100: (dummy-internal)
p0 1/1: (dummy-pmd: configured_rx_queues=1, configured_tx_queues=<cleared>, requested_rx_queues=1, requested_tx_queues=<cleared>)
])
OVS_VSWITCHD_STOP
AT_CLEANUP
AT_SETUP([PMD - multiqueue support])
OVS_VSWITCHD_START([add-port br0 p0 -- set Interface p0 type=dummy-pmd], [], [], [DUMMY_NUMA])
CHECK_CPU_DISCOVERED()
CHECK_PMD_THREADS_CREATED()
AT_CHECK([ovs-vsctl set interface p0 options:n_rxq=8])
AT_CHECK([ovs-appctl dpif/show | sed 's/\(tx_queues=\)[[0-9]]*/\1<cleared>/g'], [0], [dnl
dummy@ovs-dummy: hit:0 missed:0
br0:
br0 65534/100: (dummy-internal)
p0 1/1: (dummy-pmd: configured_rx_queues=8, configured_tx_queues=<cleared>, requested_rx_queues=8, requested_tx_queues=<cleared>)
])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed SED_NUMA_CORE_PATTERN], [0], [dnl
pmd thread numa_id <cleared> core_id <cleared>:
isolated : false
dpif-netdev: Only poll enabled vhost queues. We currently poll all available queues based on the max queue count exchanged with the vhost peer and rely on the vhost library in DPDK to check the vring status beneath. This can lead to some overhead when we have a lot of unused queues. To enhance the situation, we can skip the disabled queues. On rxq notifications, we make use of the netdev's change_seq number so that the pmd thread main loop can cache the queue state periodically. $ ovs-appctl dpif-netdev/pmd-rxq-show pmd thread numa_id 0 core_id 1: isolated : true port: dpdk0 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 2: isolated : true port: vhost1 queue-id: 0 (enabled) pmd usage: 0 % port: vhost3 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 15: isolated : true port: dpdk1 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 16: isolated : true port: vhost0 queue-id: 0 (enabled) pmd usage: 0 % port: vhost2 queue-id: 0 (enabled) pmd usage: 0 % $ while true; do ovs-appctl dpif-netdev/pmd-rxq-show |awk ' /port: / { tot++; if ($5 == "(enabled)") { en++; } } END { print "total: " tot ", enabled: " en }' sleep 1 done total: 6, enabled: 2 total: 6, enabled: 2 ... # Started vm, virtio devices are bound to kernel driver which enables # F_MQ + all queue pairs total: 6, enabled: 2 total: 66, enabled: 66 ... # Unbound vhost0 and vhost1 from the kernel driver total: 66, enabled: 66 total: 66, enabled: 34 ... # Configured kernel bound devices to use only 1 queue pair total: 66, enabled: 34 total: 66, enabled: 19 total: 66, enabled: 4 ... # While rebooting the vm total: 66, enabled: 4 total: 66, enabled: 2 ... total: 66, enabled: 66 ... # After shutting down the vm total: 66, enabled: 66 total: 66, enabled: 2 Signed-off-by: David Marchand <david.marchand@redhat.com> Acked-by: Ilya Maximets <i.maximets@samsung.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
2019-04-25 17:22:08 +02:00
port: p0 queue-id: 0 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 1 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 2 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 3 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 4 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 5 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 6 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 7 (enabled) pmd usage: NOT AVAIL
])
OVS_VSWITCHD_STOP
AT_CLEANUP
AT_SETUP([PMD - pmd-cpu-mask/distribution of rx queues])
OVS_VSWITCHD_START([add-port br0 p0 -- set Interface p0 type=dummy-pmd options:n_rxq=8],
[], [], [DUMMY_NUMA])
CHECK_CPU_DISCOVERED(2)
CHECK_PMD_THREADS_CREATED()
AT_CHECK([ovs-appctl dpif/show | sed 's/\(tx_queues=\)[[0-9]]*/\1<cleared>/g'], [0], [dnl
dummy@ovs-dummy: hit:0 missed:0
br0:
br0 65534/100: (dummy-internal)
p0 1/1: (dummy-pmd: configured_rx_queues=8, configured_tx_queues=<cleared>, requested_rx_queues=8, requested_tx_queues=<cleared>)
])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed SED_NUMA_CORE_PATTERN], [0], [dnl
pmd thread numa_id <cleared> core_id <cleared>:
isolated : false
dpif-netdev: Only poll enabled vhost queues. We currently poll all available queues based on the max queue count exchanged with the vhost peer and rely on the vhost library in DPDK to check the vring status beneath. This can lead to some overhead when we have a lot of unused queues. To enhance the situation, we can skip the disabled queues. On rxq notifications, we make use of the netdev's change_seq number so that the pmd thread main loop can cache the queue state periodically. $ ovs-appctl dpif-netdev/pmd-rxq-show pmd thread numa_id 0 core_id 1: isolated : true port: dpdk0 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 2: isolated : true port: vhost1 queue-id: 0 (enabled) pmd usage: 0 % port: vhost3 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 15: isolated : true port: dpdk1 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 16: isolated : true port: vhost0 queue-id: 0 (enabled) pmd usage: 0 % port: vhost2 queue-id: 0 (enabled) pmd usage: 0 % $ while true; do ovs-appctl dpif-netdev/pmd-rxq-show |awk ' /port: / { tot++; if ($5 == "(enabled)") { en++; } } END { print "total: " tot ", enabled: " en }' sleep 1 done total: 6, enabled: 2 total: 6, enabled: 2 ... # Started vm, virtio devices are bound to kernel driver which enables # F_MQ + all queue pairs total: 6, enabled: 2 total: 66, enabled: 66 ... # Unbound vhost0 and vhost1 from the kernel driver total: 66, enabled: 66 total: 66, enabled: 34 ... # Configured kernel bound devices to use only 1 queue pair total: 66, enabled: 34 total: 66, enabled: 19 total: 66, enabled: 4 ... # While rebooting the vm total: 66, enabled: 4 total: 66, enabled: 2 ... total: 66, enabled: 66 ... # After shutting down the vm total: 66, enabled: 66 total: 66, enabled: 2 Signed-off-by: David Marchand <david.marchand@redhat.com> Acked-by: Ilya Maximets <i.maximets@samsung.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
2019-04-25 17:22:08 +02:00
port: p0 queue-id: 0 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 1 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 2 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 3 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 4 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 5 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 6 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 7 (enabled) pmd usage: NOT AVAIL
])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-rxq-assign=cycles])
TMP=$(cat ovs-vswitchd.log | wc -l | tr -d [[:blank:]])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=0x3])
CHECK_PMD_THREADS_CREATED([2], [], [+$TMP])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | awk '/AVAIL$/ { printf("%s\t", $0); next } 1' | parse_pmd_rxq_show_group | sort], [0], [dnl
port: p0 queue-id: 0 3 4 7
port: p0 queue-id: 1 2 5 6
])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-rxq-assign=roundrobin])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | awk '/AVAIL$/ { printf("%s\t", $0); next } 1' | parse_pmd_rxq_show_group | sort], [0], [dnl
port: p0 queue-id: 0 2 4 6
port: p0 queue-id: 1 3 5 7
])
TMP=$(cat ovs-vswitchd.log | wc -l | tr -d [[:blank:]])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=0x1])
CHECK_PMD_THREADS_CREATED([1], [], [+$TMP])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed SED_NUMA_CORE_PATTERN], [0], [dnl
pmd thread numa_id <cleared> core_id <cleared>:
isolated : false
dpif-netdev: Only poll enabled vhost queues. We currently poll all available queues based on the max queue count exchanged with the vhost peer and rely on the vhost library in DPDK to check the vring status beneath. This can lead to some overhead when we have a lot of unused queues. To enhance the situation, we can skip the disabled queues. On rxq notifications, we make use of the netdev's change_seq number so that the pmd thread main loop can cache the queue state periodically. $ ovs-appctl dpif-netdev/pmd-rxq-show pmd thread numa_id 0 core_id 1: isolated : true port: dpdk0 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 2: isolated : true port: vhost1 queue-id: 0 (enabled) pmd usage: 0 % port: vhost3 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 15: isolated : true port: dpdk1 queue-id: 0 (enabled) pmd usage: 0 % pmd thread numa_id 0 core_id 16: isolated : true port: vhost0 queue-id: 0 (enabled) pmd usage: 0 % port: vhost2 queue-id: 0 (enabled) pmd usage: 0 % $ while true; do ovs-appctl dpif-netdev/pmd-rxq-show |awk ' /port: / { tot++; if ($5 == "(enabled)") { en++; } } END { print "total: " tot ", enabled: " en }' sleep 1 done total: 6, enabled: 2 total: 6, enabled: 2 ... # Started vm, virtio devices are bound to kernel driver which enables # F_MQ + all queue pairs total: 6, enabled: 2 total: 66, enabled: 66 ... # Unbound vhost0 and vhost1 from the kernel driver total: 66, enabled: 66 total: 66, enabled: 34 ... # Configured kernel bound devices to use only 1 queue pair total: 66, enabled: 34 total: 66, enabled: 19 total: 66, enabled: 4 ... # While rebooting the vm total: 66, enabled: 4 total: 66, enabled: 2 ... total: 66, enabled: 66 ... # After shutting down the vm total: 66, enabled: 66 total: 66, enabled: 2 Signed-off-by: David Marchand <david.marchand@redhat.com> Acked-by: Ilya Maximets <i.maximets@samsung.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
2019-04-25 17:22:08 +02:00
port: p0 queue-id: 0 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 1 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 2 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 3 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 4 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 5 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 6 (enabled) pmd usage: NOT AVAIL
port: p0 queue-id: 7 (enabled) pmd usage: NOT AVAIL
])
OVS_VSWITCHD_STOP
AT_CLEANUP
AT_SETUP([PMD - stats])
OVS_VSWITCHD_START([add-port br0 p0 -- set Interface p0 ofport_request=7 type=dummy-pmd options:n_rxq=4],
[], [], [DUMMY_NUMA])
CHECK_CPU_DISCOVERED()
CHECK_PMD_THREADS_CREATED()
AT_CHECK([ovs-appctl vlog/set dpif_netdev:dbg])
AT_CHECK([ovs-ofctl add-flow br0 action=normal])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:emc-insert-inv-prob=1])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:smc-enable=true])
sleep 1
AT_CHECK([ovs-appctl dpif/show | sed 's/\(tx_queues=\)[[0-9]]*/\1<cleared>/g'], [0], [dnl
dummy@ovs-dummy: hit:0 missed:0
br0:
br0 65534/100: (dummy-internal)
p0 7/1: (dummy-pmd: configured_rx_queues=4, configured_tx_queues=<cleared>, requested_rx_queues=4, requested_tx_queues=<cleared>)
])
AT_CHECK([ovs-appctl dpif-netdev/pmd-stats-show | sed SED_NUMA_CORE_PATTERN | sed '/cycles/d' | grep pmd -A 9], [0], [dnl
pmd thread numa_id <cleared> core_id <cleared>:
packets received: 0
packet recirculations: 0
avg. datapath passes per packet: 0.00
emc hits: 0
smc hits: 0
megaflow hits: 0
avg. subtable lookups per megaflow hit: 0.00
miss with success upcall: 0
miss with failed upcall: 0
])
ovs-appctl time/stop
ovs-appctl time/warp 100
(
for i in `seq 0 19`;
do
pkt="in_port(7),eth(src=50:54:00:00:00:77,dst=50:54:00:00:01:78),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)"
AT_CHECK([ovs-appctl netdev-dummy/receive p0 $pkt])
done
)
ovs-appctl time/warp 100
AT_CHECK([grep -A 1 'miss upcall' ovs-vswitchd.log | tail -n 1], [0], [dnl
skb_priority(0),skb_mark(0),ct_state(0),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:77,dst=50:54:00:00:01:78),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)
])
AT_CHECK([cat ovs-vswitchd.log | filter_flow_install | strip_xout], [0], [dnl
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:77,dst=50:54:00:00:01:78),eth_type(0x0800),ipv4(frag=no), actions: <del>
])
AT_CHECK([ovs-appctl dpif-netdev/pmd-stats-show | sed SED_NUMA_CORE_PATTERN | sed '/cycles/d' | grep pmd -A 9], [0], [dnl
pmd thread numa_id <cleared> core_id <cleared>:
packets received: 20
packet recirculations: 0
avg. datapath passes per packet: 1.00
emc hits: 19
smc hits: 0
megaflow hits: 0
avg. subtable lookups per megaflow hit: 0.00
miss with success upcall: 1
miss with failed upcall: 0
])
OVS_VSWITCHD_STOP
AT_CLEANUP
dnl Reconfigure the number of rx queues of a port, make sure that all the
dnl queues are polled by the datapath and try to send a couple of packets.
AT_SETUP([PMD - reconfigure n_rxq])
OVS_VSWITCHD_START(
[add-port br0 p1 -- set Interface p1 type=dummy-pmd ofport_request=1 options:n_rxq=2 -- \
add-port br0 p2 -- set Interface p2 type=dummy-pmd ofport_request=2
], [], [], [--dummy-numa 0])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
AT_CHECK([ovs-ofctl add-flow br0 action=controller])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 0
p1 1 0 0
p2 0 0 0
])
AT_CAPTURE_FILE([ofctl_monitor.log])
AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
AT_CHECK([ovs-appctl netdev-dummy/receive p1 --qid 1 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 2])
OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
AT_CHECK([cat ofctl_monitor.log], [0], [dnl
NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
])
AT_CHECK([ovs-vsctl set interface p1 options:n_rxq=4])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 0
p1 1 0 0
p1 2 0 0
p1 3 0 0
p2 0 0 0
])
AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
AT_CHECK([ovs-appctl netdev-dummy/receive p1 --qid 3 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 2])
OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
AT_CHECK([cat ofctl_monitor.log], [0], [dnl
NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
])
dnl Check resetting to default number of rx queues after removal from the db.
AT_CHECK([ovs-vsctl remove interface p1 options n_rxq])
AT_CHECK([ovs-appctl dpif/show | grep p1 | sed 's/\(tx_queues=\)[[0-9]]*/\1<cleared>/g'], [0], [dnl
p1 1/1: (dummy-pmd: configured_rx_queues=1, configured_tx_queues=<cleared>, requested_rx_queues=1, requested_tx_queues=<cleared>)
])
OVS_VSWITCHD_STOP
AT_CLEANUP
dnl There was a bug where OVS failed to create a ukey and install a megaflow
dnl if a packet with the exact same flow was received by two different pmd
dnl threads. This is a regression test for that bug.
AT_SETUP([PMD - same flow multiple threads])
OVS_VSWITCHD_START(
[add-port br0 p1 -- set Interface p1 type=dummy-pmd ofport_request=1 options:n_rxq=2 -- \
set Open_vSwitch . other_config:pmd-cpu-mask=3
], [], [], [--dummy-numa 0,0])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
AT_CHECK([ovs-ofctl add-flow br0 action=controller])
dnl Make sure that the queues are on different cores. There's no way to
dnl control which queue is on which thread, we just need to make sure that
dnl two threads (core_id) show up in pmd-rxq-show
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show | cut -f 4 -d ' ' | sort], [0], [dnl
0
1
])
AT_CAPTURE_FILE([ofctl_monitor.log])
AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
AT_CHECK([ovs-appctl netdev-dummy/receive p1 --qid 0 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
AT_CHECK([ovs-appctl netdev-dummy/receive p1 --qid 1 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 4])
OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
dnl Make sure that both flows have been installed
AT_CHECK([ovs-appctl dpctl/dump-flows | flow_dump_prepend_pmd], [0], [dnl
0 recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(frag=no), packets:0, bytes:0, used:never, actions:userspace(pid=0,controller(reason=1,dont_send=0,continuation=0,recirc_id=1,rule_cookie=0,controller_id=0,max_len=65535))
1 recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(frag=no), packets:0, bytes:0, used:never, actions:userspace(pid=0,controller(reason=1,dont_send=0,continuation=0,recirc_id=1,rule_cookie=0,controller_id=0,max_len=65535))
])
AT_CHECK([cat ofctl_monitor.log], [0], [dnl
NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
])
OVS_VSWITCHD_STOP
AT_CLEANUP
AT_SETUP([PMD - change numa node])
OVS_VSWITCHD_START(
[add-port br0 p1 -- set Interface p1 type=dummy-pmd ofport_request=1 options:n_rxq=2 -- \
add-port br0 p2 -- set Interface p2 type=dummy-pmd ofport_request=2 options:n_rxq=2 -- \
set Open_vSwitch . other_config:pmd-cpu-mask=3
], [], [], [--dummy-numa 0,1])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
AT_CHECK([ovs-ofctl add-flow br0 action=controller])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 0
p1 1 0 0
p2 0 0 0
p2 1 0 0
])
AT_CAPTURE_FILE([ofctl_monitor.log])
AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
AT_CHECK([ovs-appctl netdev-dummy/receive p1 --qid 0 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 2])
OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
AT_CHECK([cat ofctl_monitor.log], [0], [dnl
NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
])
AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
AT_CHECK([ovs-appctl netdev-dummy/receive p2 --qid 1 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 2])
OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
AT_CHECK([cat ofctl_monitor.log], [0], [dnl
NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=2 (via action) data_len=106 (unbuffered)
icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
])
AT_CHECK([ovs-vsctl set Interface p2 options:numa_id=1])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 0
p1 1 0 0
p2 0 1 1
p2 1 1 1
])
AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
AT_CHECK([ovs-appctl netdev-dummy/receive p1 --qid 1 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 2])
OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
AT_CHECK([cat ofctl_monitor.log], [0], [dnl
NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
])
AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
AT_CHECK([ovs-appctl netdev-dummy/receive p2 --qid 0 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 2])
OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
AT_CHECK([cat ofctl_monitor.log], [0], [dnl
NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=2 (via action) data_len=106 (unbuffered)
icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
])
OVS_VSWITCHD_STOP
AT_CLEANUP
AT_SETUP([PMD - non pmd device])
OVS_VSWITCHD_START(
[add-port br0 p1 -- set Interface p1 type=dummy-pmd ofport_request=1 options:n_rxq=1 -- \
add-port br0 p2 -- set Interface p2 type=dummy ofport_request=2 -- \
set Interface br0 options:tx_pcap=br0.pcap -- \
set Open_vSwitch . other_config:pmd-cpu-mask=1
], [], [], [--dummy-numa 0,0])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
AT_CHECK([ovs-ofctl add-flow br0 actions=LOCAL])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 0
])
AT_CHECK([ovs-appctl netdev-dummy/receive p1 --qid 0 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
AT_CHECK([ovs-appctl netdev-dummy/receive p2 --qid 0 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
OVS_WAIT_UNTIL([test `ovs-pcap br0.pcap | wc -l` -ge 2])
AT_CHECK([ovs-pcap br0.pcap], [0], [dnl
50540000000a50540000000908004500005c000000004001669f0a0000020a000001080013fc00000000000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f
50540000000a50540000000908004500005c000000004001669f0a0000020a000001080013fc00000000000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f
])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=2])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 1
])
AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
AT_CHECK([ovs-appctl netdev-dummy/receive p1 --qid 0 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
AT_CHECK([ovs-appctl netdev-dummy/receive p2 --qid 0 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
OVS_WAIT_UNTIL([test `ovs-pcap br0.pcap | wc -l` -ge 4])
AT_CHECK([ovs-pcap br0.pcap], [0], [dnl
50540000000a50540000000908004500005c000000004001669f0a0000020a000001080013fc00000000000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f
50540000000a50540000000908004500005c000000004001669f0a0000020a000001080013fc00000000000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f
50540000000a50540000000908004500005c000000004001669f0a0000020a000001080013fc00000000000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f
50540000000a50540000000908004500005c000000004001669f0a0000020a000001080013fc00000000000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f
])
OVS_VSWITCHD_STOP
AT_CLEANUP
AT_SETUP([PMD - add remove ports])
OVS_VSWITCHD_START(
[], [], [], [--dummy-numa 0,0])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
AT_CHECK([ovs-ofctl add-flow br0 actions=controller])
AT_CHECK([ovs-vsctl add-port br0 p1 -- set Interface p1 type=dummy-pmd ofport_request=1 options:n_rxq=1])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 0
])
AT_CAPTURE_FILE([ofctl_monitor.log])
AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
AT_CHECK([ovs-appctl netdev-dummy/receive p1 --qid 0 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 2])
OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
AT_CHECK([cat ofctl_monitor.log], [0], [dnl
NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
])
AT_CHECK([ovs-vsctl del-port br0 p1])
AT_CHECK([ovs-vsctl add-port br0 p1 -- set Interface p1 type=dummy-pmd ofport_request=1 options:n_rxq=1])
AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log])
AT_CHECK([ovs-appctl netdev-dummy/receive p1 --qid 0 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 2])
OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
AT_CHECK([cat ofctl_monitor.log], [0], [dnl
NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered)
icmp,vlan_tci=0x0000,dl_src=50:54:00:00:00:09,dl_dst=50:54:00:00:00:0a,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,icmp_type=8,icmp_code=0 icmp_csum:13fc
])
OVS_VSWITCHD_STOP
AT_CLEANUP
AT_SETUP([PMD - rxq affinity])
OVS_VSWITCHD_START(
[], [], [], [--dummy-numa 0,0,0,0,0,0,0,0,0])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
AT_CHECK([ovs-ofctl add-flow br0 actions=controller])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=0x1fe])
AT_CHECK([ovs-vsctl add-port br0 p1 -- set Interface p1 type=dummy-pmd ofport_request=1 options:n_rxq=4 other_config:pmd-rxq-affinity="0:3,1:7,2:2,3:8"])
dnl The rxqs should be on the requested cores.
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 3
p1 1 0 7
p1 2 0 2
p1 3 0 8
])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=6])
dnl We removed the cores requested by some queues from pmd-cpu-mask.
dnl Those queues will not be polled.
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 2 0 2
])
AT_CHECK([ovs-vsctl remove Interface p1 other_config pmd-rxq-affinity])
dnl We removed the rxq-affinity request. dpif-netdev should assign queues
dnl in a round robin fashion. We just make sure that every rxq is being
dnl polled again.
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show | cut -f 1,2 -d ' ' | sort], [0], [dnl
p1 0
p1 1
p1 2
p1 3
])
AT_CHECK([ovs-vsctl set Interface p1 other_config:pmd-rxq-affinity='0:1'])
dnl We explicitly requested core 1 for queue 0. Core 1 becomes isolated and
dnl every other queue goes to core 2.
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 1
p1 1 0 2
p1 2 0 2
p1 3 0 2
])
OVS_VSWITCHD_STOP(["/dpif_netdev|WARN|There is no PMD thread on core/d"])
AT_CLEANUP
dpif-netdev: Fix crash in dpif_netdev_execute(). dp_netdev_get_pmd() is allowed to return NULL (even if we call it with NON_PMD_CORE_ID) for different reasons: * Since we use RCU to protect pmd threads, it is possible that ovs_refcount_try_ref_rcu() has failed. * During reconfiguration we destroy every thread. This commit makes sure that we always handle the case when dp_netdev_get_pmd() returns NULL without crashing (the change in dpif_netdev_run() doesn't fix anything, because everything is happening in the main thread, but it's better to honor the interface in case we change our threading model). This actually fixes a pretty serious crash that happens if dpif_netdev_execute() is called from a non pmd thread while reconfiguration is happening. It can be triggered by enabling bfd (because it's handled by the monitor thread, which is a non pmd thread) on an interface and changing something that requires datapath reconfiguration (n_rxq, pmd-cpu-mask, mtu). A testcase that reproduces the race condition is included. This is a possible backtrace of the segfault: #0 0x000000000060c7f1 in dp_execute_cb (aux_=0x7f1dd2d2a320, packets_=0x7f1dd2d2a370, a=0x7f1dd2d2a658, may_steal=false) at ../lib/dpif-netdev.c:4357 #1 0x00000000006448b2 in odp_execute_actions (dp=0x7f1dd2d2a320, batch=0x7f1dd2d2a370, steal=false, actions=0x7f1dd2d2a658, actions_len=8, dp_execute_action=0x60c7a5 <dp_execute_cb>) at ../lib/odp-execute.c:538 #2 0x000000000060d00c in dp_netdev_execute_actions (pmd=0x0, packets=0x7f1dd2d2a370, may_steal=false, flow=0x7f1dd2d2ae70, actions=0x7f1dd2d2a658, actions_len=8, now=44965873) at ../lib/dpif-netdev.c:4577 #3 0x000000000060834a in dpif_netdev_execute (dpif=0x2b67b70, execute=0x7f1dd2d2a578) at ../lib/dpif-netdev.c:2624 #4 0x0000000000608441 in dpif_netdev_operate (dpif=0x2b67b70, ops=0x7f1dd2d2a5c8, n_ops=1) at ../lib/dpif-netdev.c:2654 #5 0x0000000000610a30 in dpif_operate (dpif=0x2b67b70, ops=0x7f1dd2d2a5c8, n_ops=1) at ../lib/dpif.c:1268 #6 0x000000000061098c in dpif_execute (dpif=0x2b67b70, execute=0x7f1dd2d2aa50) at ../lib/dpif.c:1233 #7 0x00000000005b9008 in ofproto_dpif_execute_actions__ (ofproto=0x2b69360, version=18446744073709551614, flow=0x7f1dd2d2ae70, rule=0x0, ofpacts=0x7f1dd2d2b100, ofpacts_len=16, indentation=0, depth=0, resubmits=0, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif.c:3806 #8 0x00000000005b907a in ofproto_dpif_execute_actions (ofproto=0x2b69360, version=18446744073709551614, flow=0x7f1dd2d2ae70, rule=0x0, ofpacts=0x7f1dd2d2b100, ofpacts_len=16, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif.c:3823 #9 0x00000000005dea9b in xlate_send_packet (ofport=0x2b98380, oam=false, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif-xlate.c:5792 #10 0x00000000005bab12 in ofproto_dpif_send_packet (ofport=0x2b98380, oam=false, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif.c:4628 #11 0x00000000005c3fc8 in monitor_mport_run (mport=0x2b8cd00, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif-monitor.c:287 #12 0x00000000005c3d9b in monitor_run () at ../ofproto/ofproto-dpif-monitor.c:227 #13 0x00000000005c3cab in monitor_main (args=0x0) at ../ofproto/ofproto-dpif-monitor.c:189 #14 0x00000000006a183a in ovsthread_wrapper (aux_=0x2b8afd0) at ../lib/ovs-thread.c:342 #15 0x00007f1dd75eb444 in start_thread (arg=0x7f1dd2d2c700) at pthread_create.c:333 #16 0x00007f1dd6e1d20d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109 Signed-off-by: Daniele Di Proietto <diproiettod@vmware.com> Acked-by: Ben Pfaff <blp@ovn.org>
2016-10-04 14:53:31 -07:00
AT_SETUP([PMD - rxq affinity - NUMA])
OVS_VSWITCHD_START(
[], [], [], [--dummy-numa 0,0,0,1,1])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
AT_CHECK([ovs-ofctl add-flow br0 actions=controller])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=7e])
AT_CHECK([ovs-vsctl add-port br0 p1 -- set Interface p1 type=dummy-pmd ofport_request=1 options:n_rxq=2 options:numa_id=0 other_config:pmd-rxq-affinity="0:1,1:2"])
dnl The rxqs should be on the requested cores.
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 1
p1 1 0 2
])
AT_CHECK([ovs-vsctl set Interface p1 other_config:pmd-rxq-affinity="0:3,1:4"])
dnl We moved the queues to different numa node. Expecting threads on
dnl NUMA node 1 to be created.
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 1 3
p1 1 1 4
])
AT_CHECK([ovs-vsctl set Interface p1 other_config:pmd-rxq-affinity="0:3,1:1"])
dnl Queues splitted between NUMA nodes.
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 1 3
p1 1 0 1
])
AT_CHECK([ovs-vsctl remove Interface p1 other_config pmd-rxq-affinity])
dnl We removed the rxq-affinity request. dpif-netdev should assign queues
dnl in a round robin fashion. We just make sure that every rxq is being
dnl polled again.
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show | cut -f 1,2 -d ' ' | sort], [0], [dnl
p1 0
p1 1
])
AT_CHECK([ovs-vsctl set Interface p1 other_config:pmd-rxq-affinity='0:3'])
dnl We explicitly requesting NUMA node 1 for queue 0.
dnl Queue 1 should be polled by thread from NUMA node 0.
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show | cut -f 1,2,3 -d ' '], [0], [dnl
p1 0 1
p1 1 0
])
OVS_VSWITCHD_STOP
AT_CLEANUP
dpif-netdev: Fix crash in dpif_netdev_execute(). dp_netdev_get_pmd() is allowed to return NULL (even if we call it with NON_PMD_CORE_ID) for different reasons: * Since we use RCU to protect pmd threads, it is possible that ovs_refcount_try_ref_rcu() has failed. * During reconfiguration we destroy every thread. This commit makes sure that we always handle the case when dp_netdev_get_pmd() returns NULL without crashing (the change in dpif_netdev_run() doesn't fix anything, because everything is happening in the main thread, but it's better to honor the interface in case we change our threading model). This actually fixes a pretty serious crash that happens if dpif_netdev_execute() is called from a non pmd thread while reconfiguration is happening. It can be triggered by enabling bfd (because it's handled by the monitor thread, which is a non pmd thread) on an interface and changing something that requires datapath reconfiguration (n_rxq, pmd-cpu-mask, mtu). A testcase that reproduces the race condition is included. This is a possible backtrace of the segfault: #0 0x000000000060c7f1 in dp_execute_cb (aux_=0x7f1dd2d2a320, packets_=0x7f1dd2d2a370, a=0x7f1dd2d2a658, may_steal=false) at ../lib/dpif-netdev.c:4357 #1 0x00000000006448b2 in odp_execute_actions (dp=0x7f1dd2d2a320, batch=0x7f1dd2d2a370, steal=false, actions=0x7f1dd2d2a658, actions_len=8, dp_execute_action=0x60c7a5 <dp_execute_cb>) at ../lib/odp-execute.c:538 #2 0x000000000060d00c in dp_netdev_execute_actions (pmd=0x0, packets=0x7f1dd2d2a370, may_steal=false, flow=0x7f1dd2d2ae70, actions=0x7f1dd2d2a658, actions_len=8, now=44965873) at ../lib/dpif-netdev.c:4577 #3 0x000000000060834a in dpif_netdev_execute (dpif=0x2b67b70, execute=0x7f1dd2d2a578) at ../lib/dpif-netdev.c:2624 #4 0x0000000000608441 in dpif_netdev_operate (dpif=0x2b67b70, ops=0x7f1dd2d2a5c8, n_ops=1) at ../lib/dpif-netdev.c:2654 #5 0x0000000000610a30 in dpif_operate (dpif=0x2b67b70, ops=0x7f1dd2d2a5c8, n_ops=1) at ../lib/dpif.c:1268 #6 0x000000000061098c in dpif_execute (dpif=0x2b67b70, execute=0x7f1dd2d2aa50) at ../lib/dpif.c:1233 #7 0x00000000005b9008 in ofproto_dpif_execute_actions__ (ofproto=0x2b69360, version=18446744073709551614, flow=0x7f1dd2d2ae70, rule=0x0, ofpacts=0x7f1dd2d2b100, ofpacts_len=16, indentation=0, depth=0, resubmits=0, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif.c:3806 #8 0x00000000005b907a in ofproto_dpif_execute_actions (ofproto=0x2b69360, version=18446744073709551614, flow=0x7f1dd2d2ae70, rule=0x0, ofpacts=0x7f1dd2d2b100, ofpacts_len=16, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif.c:3823 #9 0x00000000005dea9b in xlate_send_packet (ofport=0x2b98380, oam=false, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif-xlate.c:5792 #10 0x00000000005bab12 in ofproto_dpif_send_packet (ofport=0x2b98380, oam=false, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif.c:4628 #11 0x00000000005c3fc8 in monitor_mport_run (mport=0x2b8cd00, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif-monitor.c:287 #12 0x00000000005c3d9b in monitor_run () at ../ofproto/ofproto-dpif-monitor.c:227 #13 0x00000000005c3cab in monitor_main (args=0x0) at ../ofproto/ofproto-dpif-monitor.c:189 #14 0x00000000006a183a in ovsthread_wrapper (aux_=0x2b8afd0) at ../lib/ovs-thread.c:342 #15 0x00007f1dd75eb444 in start_thread (arg=0x7f1dd2d2c700) at pthread_create.c:333 #16 0x00007f1dd6e1d20d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109 Signed-off-by: Daniele Di Proietto <diproiettod@vmware.com> Acked-by: Ben Pfaff <blp@ovn.org>
2016-10-04 14:53:31 -07:00
AT_SETUP([PMD - monitor threads])
OVS_VSWITCHD_START(
[], [], [], [--dummy-numa 0,0])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
dnl The two devices are connected together externally using net.sock
AT_CHECK([ovs-vsctl add-port br0 p1 -- set Interface p1 type=dummy-pmd ofport_request=1 options:n_rxq=1 options:pstream=punix:$OVS_RUNDIR/net.sock])
AT_CHECK([ovs-vsctl add-port br0 p2 -- set Interface p2 type=dummy-pmd ofport_request=2 options:n_rxq=1 options:stream=unix:$OVS_RUNDIR/net.sock])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 0
p2 0 0 0
])
dnl Enable bfd with a very aggressive interval. This will make the monitor very
dnl busy, and uncover race conditions with the main thread.
AT_CHECK([ovs-vsctl set Interface p1 bfd:enable=true bfd:min_rx=1 bfd:min_tx=1])
AT_CHECK([ovs-vsctl set Interface p2 bfd:enable=true bfd:min_rx=1 bfd:min_tx=1])
AT_CHECK([ovs-vsctl --timeout=10 wait-until Interface p1 bfd_status:forwarding=true \
-- wait-until Interface p2 bfd_status:forwarding=true])
dnl Trigger reconfiguration of the datapath
AT_CHECK([ovs-vsctl set Interface p1 options:n_rxq=2])
AT_CHECK([ovs-vsctl set Interface p2 options:n_rxq=2])
dnl Make sure that reconfiguration succeded
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
p1 0 0 0
p1 1 0 0
p2 0 0 0
p2 1 0 0
])
dpif-netdev: Centralized threads and queues handling code. Currently we have three different code paths that deal with pmd threads and queues, in response to different input 1. When a port is added 2. When a port is deleted 3. When the cpumask changes or a port must be reconfigured. 1. and 2. are carefully written to minimize disruption to the running datapath, while 3. brings down all the threads reconfigure all the ports and restarts everything. This commit removes the three separate code paths by introducing the reconfigure_datapath() function, that takes care of adapting the pmd threads and queues to the current datapath configuration, no matter how we got there. This aims at simplifying maintenance and introduces a long overdue improvement: port reconfiguration (can happen quite frequently for dpdkvhost ports) is now done without shutting down the whole datapath, but just by temporarily removing the port that needs to be reconfigured (while the rest of the datapath is running). We now also recompute the rxq scheduling from scratch every time a port is added of deleted. This means that the queues will be more balanced, especially when dealing with explicit rxq-affinity from the user (without shutting down the threads and restarting them), but it also means that adding or deleting a port might cause existing queues to be moved between pmd threads. This negative effect can be avoided by taking into account the existing distribution when computing the new scheduling, but I considered code clarity and fast reconfiguration more important than optimizing port addition or removal (a port is added and removed only once, but can be reconfigured many times) Lastly, this commit moves the pmd threads state away from ovs-numa. Now the pmd threads state is kept only in dpif-netdev. Signed-off-by: Daniele Di Proietto <diproiettod@vmware.com> Co-authored-by: Ilya Maximets <i.maximets@samsung.com> Signed-off-by: Ilya Maximets <i.maximets@samsung.com> Acked-by: Ilya Maximets <i.maximets@samsung.com>
2016-11-15 15:40:49 -08:00
OVS_VSWITCHD_STOP
dpif-netdev: Fix crash in dpif_netdev_execute(). dp_netdev_get_pmd() is allowed to return NULL (even if we call it with NON_PMD_CORE_ID) for different reasons: * Since we use RCU to protect pmd threads, it is possible that ovs_refcount_try_ref_rcu() has failed. * During reconfiguration we destroy every thread. This commit makes sure that we always handle the case when dp_netdev_get_pmd() returns NULL without crashing (the change in dpif_netdev_run() doesn't fix anything, because everything is happening in the main thread, but it's better to honor the interface in case we change our threading model). This actually fixes a pretty serious crash that happens if dpif_netdev_execute() is called from a non pmd thread while reconfiguration is happening. It can be triggered by enabling bfd (because it's handled by the monitor thread, which is a non pmd thread) on an interface and changing something that requires datapath reconfiguration (n_rxq, pmd-cpu-mask, mtu). A testcase that reproduces the race condition is included. This is a possible backtrace of the segfault: #0 0x000000000060c7f1 in dp_execute_cb (aux_=0x7f1dd2d2a320, packets_=0x7f1dd2d2a370, a=0x7f1dd2d2a658, may_steal=false) at ../lib/dpif-netdev.c:4357 #1 0x00000000006448b2 in odp_execute_actions (dp=0x7f1dd2d2a320, batch=0x7f1dd2d2a370, steal=false, actions=0x7f1dd2d2a658, actions_len=8, dp_execute_action=0x60c7a5 <dp_execute_cb>) at ../lib/odp-execute.c:538 #2 0x000000000060d00c in dp_netdev_execute_actions (pmd=0x0, packets=0x7f1dd2d2a370, may_steal=false, flow=0x7f1dd2d2ae70, actions=0x7f1dd2d2a658, actions_len=8, now=44965873) at ../lib/dpif-netdev.c:4577 #3 0x000000000060834a in dpif_netdev_execute (dpif=0x2b67b70, execute=0x7f1dd2d2a578) at ../lib/dpif-netdev.c:2624 #4 0x0000000000608441 in dpif_netdev_operate (dpif=0x2b67b70, ops=0x7f1dd2d2a5c8, n_ops=1) at ../lib/dpif-netdev.c:2654 #5 0x0000000000610a30 in dpif_operate (dpif=0x2b67b70, ops=0x7f1dd2d2a5c8, n_ops=1) at ../lib/dpif.c:1268 #6 0x000000000061098c in dpif_execute (dpif=0x2b67b70, execute=0x7f1dd2d2aa50) at ../lib/dpif.c:1233 #7 0x00000000005b9008 in ofproto_dpif_execute_actions__ (ofproto=0x2b69360, version=18446744073709551614, flow=0x7f1dd2d2ae70, rule=0x0, ofpacts=0x7f1dd2d2b100, ofpacts_len=16, indentation=0, depth=0, resubmits=0, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif.c:3806 #8 0x00000000005b907a in ofproto_dpif_execute_actions (ofproto=0x2b69360, version=18446744073709551614, flow=0x7f1dd2d2ae70, rule=0x0, ofpacts=0x7f1dd2d2b100, ofpacts_len=16, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif.c:3823 #9 0x00000000005dea9b in xlate_send_packet (ofport=0x2b98380, oam=false, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif-xlate.c:5792 #10 0x00000000005bab12 in ofproto_dpif_send_packet (ofport=0x2b98380, oam=false, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif.c:4628 #11 0x00000000005c3fc8 in monitor_mport_run (mport=0x2b8cd00, packet=0x7f1dd2d2b5c0) at ../ofproto/ofproto-dpif-monitor.c:287 #12 0x00000000005c3d9b in monitor_run () at ../ofproto/ofproto-dpif-monitor.c:227 #13 0x00000000005c3cab in monitor_main (args=0x0) at ../ofproto/ofproto-dpif-monitor.c:189 #14 0x00000000006a183a in ovsthread_wrapper (aux_=0x2b8afd0) at ../lib/ovs-thread.c:342 #15 0x00007f1dd75eb444 in start_thread (arg=0x7f1dd2d2c700) at pthread_create.c:333 #16 0x00007f1dd6e1d20d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109 Signed-off-by: Daniele Di Proietto <diproiettod@vmware.com> Acked-by: Ben Pfaff <blp@ovn.org>
2016-10-04 14:53:31 -07:00
AT_CLEANUP
AT_SETUP([PMD - dpctl])
OVS_VSWITCHD_START(
[del-br br0], [], [], [--dummy-numa 0,0])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
AT_CHECK([ovs-appctl dpctl/add-dp dummy@dp0])
AT_CHECK([ovs-appctl dpctl/add-if dummy@dp0 p1,type=dummy-pmd])
AT_CHECK([ovs-appctl dpctl/add-if dummy@dp0 p2,type=dummy])
AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show dp0 | parse_pmd_rxq_show], [0], [dnl
p1 0 0 0
])
AT_CHECK([ovs-appctl dpctl/show dummy@dp0], [0], [dnl
dummy@dp0:
lookups: hit:0 missed:0 lost:0
flows: 0
port 0: dp0 (dummy-internal)
port 1: p1 (dummy-pmd: configured_rx_queues=1, configured_tx_queues=1, requested_rx_queues=1, requested_tx_queues=1)
port 2: p2 (dummy)
])
AT_CHECK([ovs-appctl dpctl/add-flow dummy@dp0 'in_port(1),eth(src=00:00:00:00:00:01,dst=00:00:00:00:00:02),eth_type(0x1234)' 2], [0], [dnl
])
AT_CHECK([ovs-appctl dpctl/dump-flows dummy@dp0 | sort], [0], [dnl
flow-dump from non-dpdk interfaces:
flow-dump from pmd on cpu core: 0
recirc_id(0),in_port(1),eth(src=00:00:00:00:00:01,dst=00:00:00:00:00:02),eth_type(0x1234), packets:0, bytes:0, used:never, actions:2
recirc_id(0),in_port(1),eth(src=00:00:00:00:00:01,dst=00:00:00:00:00:02),eth_type(0x1234), packets:0, bytes:0, used:never, actions:2
])
AT_CHECK([ovs-appctl dpctl/del-flow dummy@dp0 'in_port(1),eth(src=00:00:00:00:00:01,dst=00:00:00:00:00:02),eth_type(0x1234)'], [0], [dnl
])
AT_CHECK([ovs-appctl dpctl/dump-flows dummy@dp0], [0], [dnl
])
AT_CHECK([ovs-appctl dpctl/del-dp dummy@dp0], [0], [dnl
])
OVS_VSWITCHD_STOP
AT_CLEANUP