2014-05-13 14:45:30 +12:00
AT_BANNER([dpif-netdev])
2016-01-22 13:42:10 -08:00
m4_divert_push([PREPARE_TESTS])
[
2014-05-13 14:45:30 +12:00
# Strips out uninteresting parts of flow output, as well as parts
# that vary from one run to another (e.g., timing and bond actions).
2017-02-23 11:27:57 -08:00
strip_timers () {
sed '
s/duration:[0-9]*\.[0-9]*/duration:0.0/
s/used:[0-9]*\.[0-9]*/used:0.0/
'
}
2016-01-22 13:42:10 -08:00
strip_xout () {
sed '
2020-07-08 06:38:21 +00:00
s/mega_ufid:[-0-9a-f]* //
2015-05-29 17:08:45 -07:00
s/ufid:[-0-9a-f]* //
2014-05-13 14:45:30 +12:00
s/used:[0-9]*\.[0-9]*/used:0.0/
s/actions:.*/actions: <del>/
s/packets:[0-9]*/packets:0/
s/bytes:[0-9]*/bytes:0/
2016-01-22 13:42:10 -08:00
' | sort
}
2016-02-24 16:10:42 -08:00
strip_xout_keep_actions () {
sed '
2020-07-08 06:38:21 +00:00
s/mega_ufid:[-0-9a-f]* //
2016-02-24 16:10:42 -08:00
s/ufid:[-0-9a-f]* //
s/used:[0-9]*\.[0-9]*/used:0.0/
s/packets:[0-9]*/packets:0/
s/bytes:[0-9]*/bytes:0/
' | sort
}
2016-01-22 13:42:10 -08:00
filter_flow_install () {
grep 'flow_add' | sed 's/.*flow_add: //' | sort | uniq
}
2019-02-26 13:38:43 +03:00
filter_hw_flow_install () {
grep 'netdev_dummy.*flow put\[create\]' | sed 's/.*|DBG|//' | sort | uniq
}
filter_hw_flow_del () {
grep 'netdev_dummy.*flow del' | sed 's/.*|DBG|//' | sort | uniq
}
filter_hw_packet_netdev_dummy () {
grep 'netdev_dummy.*: packet:.*with mark' | sed 's/.*|DBG|//' | sort | uniq
}
2016-01-22 13:42:10 -08:00
filter_flow_dump () {
grep 'flow_dump ' | sed '
2018-05-25 17:11:07 -07:00
s/.*flow_dump //
s/used:[0-9]*\.[0-9]*/used:0.0/
2016-01-22 13:42:10 -08:00
' | sort | uniq
}
strip_metadata () {
sed 's/metadata=0x[0-9a-f]*/metadata=0x0/'
}
]
m4_divert_pop([PREPARE_TESTS])
2014-05-13 14:45:30 +12:00
2017-07-25 16:02:02 +03:00
AT_SETUP([dpif-netdev - netdev-dummy/receive])
# Create br0 with interfaces p0
OVS_VSWITCHD_START([add-port br0 p1 -- set interface p1 type=dummy ofport_request=1 -- ])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
AT_CHECK([ovs-ofctl add-flow br0 action=normal])
ovs-appctl time/stop
ovs-appctl time/warp 5000
AT_CHECK([ovs-appctl netdev-dummy/receive p1 'in_port(1),eth(src=50:54:00:00:00:01,dst=50:54:00:00:02:00),eth_type(0x0800),ipv4(src=10.0.0.1,dst=10.0.0.2,proto=6,tos=0,ttl=64,frag=no),tcp(src=8,dst=9),tcp_flags(ack)'])
2020-07-23 17:17:24 +02:00
OVS_WAIT_UNTIL([grep "miss upcall" ovs-vswitchd.log])
2017-07-25 16:02:02 +03:00
AT_CHECK([grep -A 1 'miss upcall' ovs-vswitchd.log | tail -n 1], [0], [dnl
skb_priority(0),skb_mark(0),ct_state(0),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:01,dst=50:54:00:00:02:00),eth_type(0x0800),ipv4(src=10.0.0.1,dst=10.0.0.2,proto=6,tos=0,ttl=64,frag=no),tcp(src=8,dst=9),tcp_flags(ack)
])
AT_CHECK([ovs-appctl netdev-dummy/receive p1 'in_port(1),eth(src=50:54:00:00:00:05,dst=50:54:00:00:06:00),eth_type(0x0800),ipv4(src=10.0.0.5,dst=10.0.0.6,proto=6,tos=0,ttl=64,frag=no),tcp(src=8,dst=9),tcp_flags(ack)' --len 1024])
2020-07-23 17:17:24 +02:00
OVS_WAIT_UNTIL([test `grep -c "miss upcall" ovs-vswitchd.log` -ge 2])
2017-07-25 16:02:02 +03:00
AT_CHECK([grep -A 1 'miss upcall' ovs-vswitchd.log | tail -n 1], [0], [dnl
skb_priority(0),skb_mark(0),ct_state(0),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:05,dst=50:54:00:00:06:00),eth_type(0x0800),ipv4(src=10.0.0.5,dst=10.0.0.6,proto=6,tos=0,ttl=64,frag=no),tcp(src=8,dst=9),tcp_flags(ack)
])
OVS_VSWITCHD_STOP
AT_CLEANUP
2016-06-07 15:36:19 +03:00
m4_define([DPIF_NETDEV_DUMMY_IFACE],
[AT_SETUP([dpif-netdev - $1 interface])
# Create br0 with interfaces p1 and p7
# and br1 with interfaces p2 and p8
# with p1 and p2 connected via unix domain socket
OVS_VSWITCHD_START(
[add-port br0 p1 -- set interface p1 type=$1 options:pstream=punix:$OVS_RUNDIR/p0.sock ofport_request=1 -- \
add-port br0 p7 -- set interface p7 ofport_request=7 type=$1 -- \
add-br br1 -- \
set bridge br1 other-config:hwaddr=aa:66:aa:66:00:00 -- \
set bridge br1 datapath-type=dummy other-config:datapath-id=1234 \
fail-mode=secure -- \
add-port br1 p2 -- set interface p2 type=$1 options:stream=unix:$OVS_RUNDIR/p0.sock ofport_request=2 -- \
add-port br1 p8 -- set interface p8 ofport_request=8 type=$1 --], [], [],
ovs-numa: Support non-contiguous numa nodes and offline CPU cores.
This change removes the assumption that numa nodes and cores are numbered
contiguously in linux. This change is required to support some Power
systems.
A check has been added to verify that cores are online,
offline cores result in non-contiguously numbered cores.
DPDK EAL option generation is updated to work with non-contiguous numa nodes.
These options can be seen in the ovs-vswitchd.log. For example:
a system containing only numa nodes 0 and 8 will generate the following:
EAL ARGS: ovs-vswitchd --socket-mem 1024,0,0,0,0,0,0,0,1024 \
--socket-limit 1024,0,0,0,0,0,0,0,1024 -l 0
Tests for pmd and dpif-netdev have been updated to validate non-contiguous
numbered nodes.
Signed-off-by: David Wilder <dwilder@us.ibm.com>
Acked-by: Kevin Traynor <ktraynor@redhat.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2021-06-22 11:53:08 -07:00
[m4_if([$1], [dummy-pmd], [--dummy-numa="0,0,0,0,8,8,8,8"], [])])
2016-06-07 15:36:19 +03:00
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
AT_CHECK([ovs-ofctl add-flow br0 action=normal])
AT_CHECK([ovs-ofctl add-flow br1 action=normal])
ovs-appctl time/stop
ovs-appctl time/warp 5000
2017-06-23 16:47:57 +00:00
AT_CHECK([ovs-appctl netdev-dummy/receive p7 'in_port(7),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
AT_CHECK([ovs-appctl netdev-dummy/receive p8 'in_port(8),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:0b,dst=50:54:00:00:00:0c),eth_type(0x0800),ipv4(src=10.0.0.3,dst=10.0.0.4,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
2016-06-07 15:36:19 +03:00
ovs-appctl time/warp 100
sleep 1 # wait for forwarders process packets
AT_CHECK([filter_flow_install < ovs-vswitchd.log | strip_xout], [0], [dnl
2017-06-23 16:47:57 +00:00
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:0b,dst=50:54:00:00:00:0c),eth_type(0x0800),ipv4(frag=no), actions: <del>
recirc_id(0),in_port(2),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(frag=no), actions: <del>
recirc_id(0),in_port(7),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(frag=no), actions: <del>
recirc_id(0),in_port(8),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:0b,dst=50:54:00:00:00:0c),eth_type(0x0800),ipv4(frag=no), actions: <del>
2014-05-13 14:45:30 +12:00
])
2016-06-07 15:36:19 +03:00
OVS_VSWITCHD_STOP
AT_CLEANUP])
2014-05-08 12:37:52 +12:00
2016-06-07 15:36:19 +03:00
DPIF_NETDEV_DUMMY_IFACE([dummy])
DPIF_NETDEV_DUMMY_IFACE([dummy-pmd])
2014-05-08 12:37:52 +12:00
2016-06-07 15:36:19 +03:00
m4_define([DPIF_NETDEV_MISS_FLOW_INSTALL],
[AT_SETUP([dpif-netdev - miss upcall key matches flow_install - $1])
OVS_VSWITCHD_START(
2018-10-12 17:40:38 +03:00
[add-port br0 p1 \
-- set interface p1 type=$1 options:pstream=punix:$OVS_RUNDIR/p0.sock \
-- set bridge br0 datapath-type=dummy \
other-config:datapath-id=1234 fail-mode=secure], [], [],
2016-06-07 15:36:19 +03:00
[m4_if([$1], [dummy-pmd], [--dummy-numa="0,0,0,0,1,1,1,1"], [])])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
2014-05-08 12:37:52 +12:00
2016-06-07 15:36:19 +03:00
AT_CHECK([ovs-ofctl add-flow br0 action=normal])
2017-06-23 16:47:57 +00:00
AT_CHECK([ovs-appctl netdev-dummy/receive p1 'in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
ovs-appctl ofproto/trace 'in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'
2016-06-07 15:36:19 +03:00
2020-07-23 17:17:24 +02:00
OVS_WAIT_UNTIL([grep "miss upcall" ovs-vswitchd.log])
2016-06-07 15:36:19 +03:00
AT_CHECK([grep -A 1 'miss upcall' ovs-vswitchd.log | tail -n 1], [0], [dnl
2017-06-23 16:47:57 +00:00
skb_priority(0),skb_mark(0),ct_state(0),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)
2014-05-08 12:37:52 +12:00
])
2016-06-07 15:36:19 +03:00
AT_CHECK([filter_flow_install < ovs-vswitchd.log | strip_xout], [0], [dnl
2017-06-23 16:47:57 +00:00
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(frag=no), actions: <del>
2014-05-08 12:37:52 +12:00
])
2016-06-07 15:36:19 +03:00
# Now, the same again without megaflows.
AT_CHECK([ovs-appctl upcall/disable-megaflows], [0], [megaflows disabled
2014-05-08 12:37:52 +12:00
])
2017-06-23 16:47:57 +00:00
AT_CHECK([ovs-appctl netdev-dummy/receive p1 'in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
2014-05-08 12:37:52 +12:00
2020-07-23 17:17:24 +02:00
OVS_WAIT_UNTIL([test `grep -c "miss upcall" ovs-vswitchd.log` -ge 2])
2016-06-07 15:36:19 +03:00
AT_CHECK([grep -A 1 'miss upcall' ovs-vswitchd.log | tail -n 1], [0], [dnl
2017-06-23 16:47:57 +00:00
skb_priority(0),skb_mark(0),ct_state(0),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)
2014-05-08 12:37:52 +12:00
])
2016-06-07 15:36:19 +03:00
AT_CHECK([filter_flow_install < ovs-vswitchd.log | strip_xout], [0], [dnl
2017-06-23 16:47:57 +00:00
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(frag=no), actions: <del>
skb_priority(0),skb_mark(0),ct_state(-new-est-rel-rpl-inv-trk-snat-dnat),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0), actions: <del>
2014-05-08 12:37:52 +12:00
])
2016-06-07 15:36:19 +03:00
OVS_VSWITCHD_STOP
AT_CLEANUP])
DPIF_NETDEV_MISS_FLOW_INSTALL([dummy])
DPIF_NETDEV_MISS_FLOW_INSTALL([dummy-pmd])
2014-05-08 12:37:52 +12:00
2018-10-12 20:07:11 +03:00
m4_define([DPIF_NETDEV_FLOW_PUT_MODIFY],
[AT_SETUP([dpif-netdev - datapath flow modification - $1])
OVS_VSWITCHD_START(
[add-port br0 p1 -- set interface p1 type=$1 ofport_request=1 options:pstream=punix:$OVS_RUNDIR/p1.sock -- \
add-port br0 p2 -- set interface p2 type=$1 ofport_request=2 options:pstream=punix:$OVS_RUNDIR/p2.sock -- \
set bridge br0 datapath-type=dummy \
other-config:datapath-id=1234 fail-mode=secure], [], [],
[m4_if([$1], [dummy-pmd], [--dummy-numa="0,0,0,0,1,1,1,1"], [])])
AT_CHECK([ovs-appctl vlog/set dpif:file:dbg dpif_netdev:file:dbg])
# Add a flow that directs some packets received on p1 to p2 and the
# rest back out p1.
AT_CHECK([ovs-ofctl del-flows br0])
AT_CHECK([ovs-ofctl add-flow br0 priority=1,ip,in_port=1,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,actions=output:2])
AT_CHECK([ovs-ofctl add-flow br0 priority=0,in_port=1,actions=IN_PORT])
# Inject a packet of the form that should go to p2.
packet="in_port(1),packet_type(ns=0,id=0),eth(src=00:06:07:08:09:0a,dst=00:01:02:03:04:05),eth_type(0x8100),vlan(vid=1000,pcp=5),encap(eth_type(0x0800),ipv4(src=127.0.0.1,dst=127.0.0.1,proto=0,tos=0,ttl=64,frag=no))"
AT_CHECK([ovs-appctl netdev-dummy/receive p1 $packet --len 64], [0])
OVS_WAIT_UNTIL([grep "miss upcall" ovs-vswitchd.log])
AT_CHECK([grep -A 1 'miss upcall' ovs-vswitchd.log | tail -n 1], [0], [dnl
skb_priority(0),skb_mark(0),ct_state(0),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),packet_type(ns=0,id=0),eth(src=00:06:07:08:09:0a,dst=00:01:02:03:04:05),eth_type(0x8100),vlan(vid=1000,pcp=5),encap(eth_type(0x0800),ipv4(src=127.0.0.1,dst=127.0.0.1,proto=0,tos=0,ttl=64,frag=no))
])
ovs-appctl revalidator/wait
# Dump the datapath flow to see that it goes to p2 ("actions:2").
AT_CHECK([ovs-appctl dpif/dump-flows br0], [0], [dnl
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth(src=00:06:07:08:09:0a,dst=00:01:02:03:04:05),eth_type(0x8100),vlan(vid=1000,pcp=5),encap(eth_type(0x0800),ipv4(frag=no)), packets:0, bytes:0, used:never, actions:2
])
# Delete the flows, then add new flows that would not match the same
# packet as before.
AT_CHECK([ovs-ofctl del-flows br0])
AT_CHECK([ovs-ofctl add-flow br0 priority=1,in_port=1,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,dl_type=0x0801,actions=output:2])
AT_CHECK([ovs-ofctl add-flow br0 priority=0,in_port=1,actions=IN_PORT])
# Wait for flow revalidation
ovs-appctl revalidator/wait
# Inject the same packet again.
AT_CHECK([ovs-appctl netdev-dummy/receive p1 $packet --len 64])
ovs-appctl revalidator/wait
# Dump the datapath flow to see that it goes to p1 ("actions:IN_PORT").
AT_CHECK([ovs-appctl dpif/dump-flows br0 | strip_timers], [0], [dnl
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth(src=00:06:07:08:09:0a,dst=00:01:02:03:04:05),eth_type(0x8100),vlan(vid=1000,pcp=5),encap(eth_type(0x0800),ipv4(frag=no)), packets:1, bytes:64, used:0.0s, actions:1
])
OVS_VSWITCHD_STOP
AT_CLEANUP])
DPIF_NETDEV_FLOW_PUT_MODIFY([dummy])
DPIF_NETDEV_FLOW_PUT_MODIFY([dummy-pmd])
2016-06-07 15:36:19 +03:00
m4_define([DPIF_NETDEV_MISS_FLOW_DUMP],
[AT_SETUP([dpif-netdev - miss upcall key matches flow_dump - $1])
OVS_VSWITCHD_START(
2018-10-12 17:40:38 +03:00
[add-port br0 p1 \
-- set interface p1 type=$1 options:pstream=punix:$OVS_RUNDIR/p0.sock \
-- set bridge br0 datapath-type=dummy \
other-config:datapath-id=1234 fail-mode=secure], [], [],
2016-06-07 15:36:19 +03:00
[m4_if([$1], [dummy-pmd], [--dummy-numa="0,0,0,0,1,1,1,1"], [])])
AT_CHECK([ovs-appctl upcall/disable-ufid], [0], [Datapath dumping tersely using UFID disabled
2014-10-06 11:14:08 +13:00
], [])
2016-06-07 15:36:19 +03:00
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
2014-05-08 12:37:52 +12:00
2016-06-07 15:36:19 +03:00
AT_CHECK([ovs-ofctl add-flow br0 action=normal])
2017-06-23 16:47:57 +00:00
AT_CHECK([ovs-appctl netdev-dummy/receive p1 'in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
2014-05-08 12:37:52 +12:00
2020-07-23 17:17:24 +02:00
OVS_WAIT_UNTIL([grep "miss upcall" ovs-vswitchd.log])
2016-06-07 15:36:19 +03:00
AT_CHECK([grep -A 1 'miss upcall' ovs-vswitchd.log | tail -n 1], [0], [dnl
2017-06-23 16:47:57 +00:00
skb_priority(0),skb_mark(0),ct_state(0),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)
2014-05-08 12:37:52 +12:00
])
2020-07-23 17:17:24 +02:00
ovs-appctl revalidator/wait
2016-06-07 15:36:19 +03:00
AT_CHECK([filter_flow_dump < ovs-vswitchd.log | strip_xout], [0], [dnl
2017-06-23 16:47:57 +00:00
skb_priority(0/0),skb_mark(0/0),ct_state(0/0),ct_zone(0/0),ct_mark(0/0),ct_label(0/0),recirc_id(0),dp_hash(0/0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2/0.0.0.0,dst=10.0.0.1/0.0.0.0,proto=1/0,tos=0/0,ttl=64/0,frag=no),icmp(type=8/0,code=0/0), packets:0, bytes:0, used:never, actions: <del>
2014-05-08 12:37:52 +12:00
])
2016-06-07 15:36:19 +03:00
# Now, the same again without megaflows.
AT_CHECK([ovs-appctl upcall/disable-megaflows], [0], [megaflows disabled
2014-05-08 12:37:52 +12:00
])
2016-06-07 15:36:19 +03:00
AT_CHECK([ovs-appctl upcall/disable-ufid], [0], [Datapath dumping tersely using UFID disabled
2014-10-06 11:14:08 +13:00
], [])
2017-06-23 16:47:57 +00:00
AT_CHECK([ovs-appctl netdev-dummy/receive p1 'in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)'])
2014-05-08 12:37:52 +12:00
2020-07-23 17:17:24 +02:00
OVS_WAIT_UNTIL([test `grep -c "miss upcall" ovs-vswitchd.log` -ge 2])
2016-06-07 15:36:19 +03:00
AT_CHECK([grep -A 1 'miss upcall' ovs-vswitchd.log | tail -n 1], [0], [dnl
2017-06-23 16:47:57 +00:00
skb_priority(0),skb_mark(0),ct_state(0),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)
2014-05-08 12:37:52 +12:00
])
2020-07-23 17:17:24 +02:00
ovs-appctl revalidator/wait
2016-06-07 15:36:19 +03:00
AT_CHECK([filter_flow_dump < ovs-vswitchd.log | strip_xout], [0], [dnl
2017-06-23 16:47:57 +00:00
skb_priority(0),skb_mark(0),ct_state(0/0xff),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0), packets:0, bytes:0, used:never, actions: <del>
skb_priority(0/0),skb_mark(0/0),ct_state(0/0),ct_zone(0/0),ct_mark(0/0),ct_label(0/0),recirc_id(0),dp_hash(0/0),in_port(1),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2/0.0.0.0,dst=10.0.0.1/0.0.0.0,proto=1/0,tos=0/0,ttl=64/0,frag=no),icmp(type=8/0,code=0/0), packets:0, bytes:0, used:never, actions: <del>
2014-05-08 12:37:52 +12:00
])
2016-06-07 15:36:19 +03:00
OVS_VSWITCHD_STOP
AT_CLEANUP])
DPIF_NETDEV_MISS_FLOW_DUMP([dummy])
DPIF_NETDEV_MISS_FLOW_DUMP([dummy-pmd])
2017-02-23 11:27:57 -08:00
AT_SETUP([dpif-netdev - meters])
# Create br0 with interfaces p1 and p7
# and br1 with interfaces p2 and p8
# with p1 and p2 connected via unix domain socket
OVS_VSWITCHD_START(
[add-port br0 p1 -- set interface p1 type=dummy options:pstream=punix:$OVS_RUNDIR/p0.sock ofport_request=1 -- \
add-port br0 p7 -- set interface p7 ofport_request=7 type=dummy -- \
add-br br1 -- \
set bridge br1 other-config:hwaddr=aa:66:aa:66:00:00 -- \
set bridge br1 datapath-type=dummy other-config:datapath-id=1234 \
fail-mode=secure -- \
add-port br1 p2 -- set interface p2 type=dummy options:stream=unix:$OVS_RUNDIR/p0.sock ofport_request=2 -- \
add-port br1 p8 -- set interface p8 ofport_request=8 type=dummy --])
AT_CHECK([ovs-appctl vlog/set dpif:dbg dpif_netdev:dbg])
AT_CHECK([ovs-ofctl -O OpenFlow13 add-meter br0 'meter=1 pktps burst stats bands=type=drop rate=1 burst_size=1'])
AT_CHECK([ovs-ofctl -O OpenFlow13 add-meter br0 'meter=2 kbps burst stats bands=type=drop rate=1 burst_size=2'])
AT_CHECK([ovs-ofctl -O OpenFlow13 add-flow br0 'in_port=1 action=meter:1,7'])
AT_CHECK([ovs-ofctl -O OpenFlow13 add-flow br0 'in_port=7 action=meter:2,1'])
AT_CHECK([ovs-ofctl add-flow br1 'in_port=2 action=8'])
AT_CHECK([ovs-ofctl add-flow br1 'in_port=8 action=2'])
ovs-appctl time/stop
AT_CHECK([ovs-ofctl -O OpenFlow13 dump-meters br0], [0], [dnl
OFPST_METER_CONFIG reply (OF1.3) (xid=0x2):
meter=1 pktps burst stats bands=
type=drop rate=1 burst_size=1
meter=2 kbps burst stats bands=
type=drop rate=1 burst_size=2
])
ovs-appctl time/warp 5000
2021-03-03 22:46:56 +08:00
for i in `seq 1 7`; do
AT_CHECK(
[ovs-appctl netdev-dummy/receive p7 \
'in_port(7),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' --len 60])
done
for i in `seq 1 5`; do
AT_CHECK(
[ovs-appctl netdev-dummy/receive p8 \
'in_port(8),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:0b,dst=50:54:00:00:00:0c),eth_type(0x0800),ipv4(src=10.0.0.3,dst=10.0.0.4,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' --len 60])
done
2017-02-23 11:27:57 -08:00
sleep 1 # wait for forwarders process packets
# Meter 1 is measuring packets, allowing one packet per second with
dpif-netdev: Remove meter rate from the bucket size calculation.
Implementation of meters supposed to be a classic token bucket with 2
typical parameters: rate and burst size.
Burst size in this schema is the maximum number of bytes/packets that
could pass without being rate limited.
Recent changes to userspace datapath made meter implementation to be
in line with the kernel one, and this uncovered several issues.
The main problem is that maximum bucket size for unknown reason
accounts not only burst size, but also the numerical value of rate.
This creates a lot of confusion around behavior of meters.
For example, if rate is configured as 1000 pps and burst size set to 1,
this should mean that meter will tolerate bursts of 1 packet at most,
i.e. not a single packet above the rate should pass the meter.
However, current implementation calculates maximum bucket size as
(rate + burst size), so the effective bucket size will be 1001. This
means that first 1000 packets will not be rate limited and average
rate might be twice as high as the configured rate. This also makes
it practically impossible to configure meter that will have burst size
lower than the rate, which might be a desirable configuration if the
rate is high.
Inability to configure low values of a burst size and overall inability
for a user to predict what will be a maximum and average rate from the
configured parameters of a meter without looking at the OVS and kernel
code might be also classified as a security issue, because drop meters
are frequently used as a way of protection from DoS attacks.
This change removes rate from the calculation of a bucket size, making
it in line with the classic token bucket algorithm and essentially
making the rate and burst tolerance being predictable from a users'
perspective.
Same change will be proposed for the kernel implementation.
Unit tests changed back to their correct version and enhanced.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
2021-04-21 15:48:16 +02:00
# bursts of one packet, so 4 out of 5 packets should hit the drop band.
# Meter 2 is measuring kbps, with burst size 2 (== 2000 bits). 4 packets
# (240 bytes == 1920 bits) pass, but the last three packets should hit the
# drop band. There should be 80 bits remaining for the next packets.
2017-02-23 11:27:57 -08:00
AT_CHECK([ovs-ofctl -O OpenFlow13 meter-stats br0 | strip_timers], [0], [dnl
OFPST_METER reply (OF1.3) (xid=0x2):
meter:1 flow_count:1 packet_in_count:5 byte_in_count:300 duration:0.0s bands:
dpif-netdev: Remove meter rate from the bucket size calculation.
Implementation of meters supposed to be a classic token bucket with 2
typical parameters: rate and burst size.
Burst size in this schema is the maximum number of bytes/packets that
could pass without being rate limited.
Recent changes to userspace datapath made meter implementation to be
in line with the kernel one, and this uncovered several issues.
The main problem is that maximum bucket size for unknown reason
accounts not only burst size, but also the numerical value of rate.
This creates a lot of confusion around behavior of meters.
For example, if rate is configured as 1000 pps and burst size set to 1,
this should mean that meter will tolerate bursts of 1 packet at most,
i.e. not a single packet above the rate should pass the meter.
However, current implementation calculates maximum bucket size as
(rate + burst size), so the effective bucket size will be 1001. This
means that first 1000 packets will not be rate limited and average
rate might be twice as high as the configured rate. This also makes
it practically impossible to configure meter that will have burst size
lower than the rate, which might be a desirable configuration if the
rate is high.
Inability to configure low values of a burst size and overall inability
for a user to predict what will be a maximum and average rate from the
configured parameters of a meter without looking at the OVS and kernel
code might be also classified as a security issue, because drop meters
are frequently used as a way of protection from DoS attacks.
This change removes rate from the calculation of a bucket size, making
it in line with the classic token bucket algorithm and essentially
making the rate and burst tolerance being predictable from a users'
perspective.
Same change will be proposed for the kernel implementation.
Unit tests changed back to their correct version and enhanced.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
2021-04-21 15:48:16 +02:00
0: packet_count:4 byte_count:240
2017-02-23 11:27:57 -08:00
2021-03-03 22:46:56 +08:00
meter:2 flow_count:1 packet_in_count:7 byte_in_count:420 duration:0.0s bands:
dpif-netdev: Remove meter rate from the bucket size calculation.
Implementation of meters supposed to be a classic token bucket with 2
typical parameters: rate and burst size.
Burst size in this schema is the maximum number of bytes/packets that
could pass without being rate limited.
Recent changes to userspace datapath made meter implementation to be
in line with the kernel one, and this uncovered several issues.
The main problem is that maximum bucket size for unknown reason
accounts not only burst size, but also the numerical value of rate.
This creates a lot of confusion around behavior of meters.
For example, if rate is configured as 1000 pps and burst size set to 1,
this should mean that meter will tolerate bursts of 1 packet at most,
i.e. not a single packet above the rate should pass the meter.
However, current implementation calculates maximum bucket size as
(rate + burst size), so the effective bucket size will be 1001. This
means that first 1000 packets will not be rate limited and average
rate might be twice as high as the configured rate. This also makes
it practically impossible to configure meter that will have burst size
lower than the rate, which might be a desirable configuration if the
rate is high.
Inability to configure low values of a burst size and overall inability
for a user to predict what will be a maximum and average rate from the
configured parameters of a meter without looking at the OVS and kernel
code might be also classified as a security issue, because drop meters
are frequently used as a way of protection from DoS attacks.
This change removes rate from the calculation of a bucket size, making
it in line with the classic token bucket algorithm and essentially
making the rate and burst tolerance being predictable from a users'
perspective.
Same change will be proposed for the kernel implementation.
Unit tests changed back to their correct version and enhanced.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
2021-04-21 15:48:16 +02:00
0: packet_count:3 byte_count:180
2017-02-23 11:27:57 -08:00
])
dpif-netdev: Remove meter rate from the bucket size calculation.
Implementation of meters supposed to be a classic token bucket with 2
typical parameters: rate and burst size.
Burst size in this schema is the maximum number of bytes/packets that
could pass without being rate limited.
Recent changes to userspace datapath made meter implementation to be
in line with the kernel one, and this uncovered several issues.
The main problem is that maximum bucket size for unknown reason
accounts not only burst size, but also the numerical value of rate.
This creates a lot of confusion around behavior of meters.
For example, if rate is configured as 1000 pps and burst size set to 1,
this should mean that meter will tolerate bursts of 1 packet at most,
i.e. not a single packet above the rate should pass the meter.
However, current implementation calculates maximum bucket size as
(rate + burst size), so the effective bucket size will be 1001. This
means that first 1000 packets will not be rate limited and average
rate might be twice as high as the configured rate. This also makes
it practically impossible to configure meter that will have burst size
lower than the rate, which might be a desirable configuration if the
rate is high.
Inability to configure low values of a burst size and overall inability
for a user to predict what will be a maximum and average rate from the
configured parameters of a meter without looking at the OVS and kernel
code might be also classified as a security issue, because drop meters
are frequently used as a way of protection from DoS attacks.
This change removes rate from the calculation of a bucket size, making
it in line with the classic token bucket algorithm and essentially
making the rate and burst tolerance being predictable from a users'
perspective.
Same change will be proposed for the kernel implementation.
Unit tests changed back to their correct version and enhanced.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
2021-04-21 15:48:16 +02:00
# Advance time by 870 ms
ovs-appctl time/warp 870
2017-02-23 11:27:57 -08:00
2021-03-03 22:46:56 +08:00
for i in `seq 1 5`; do
AT_CHECK(
[ovs-appctl netdev-dummy/receive p7 \
'in_port(7),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' --len 60])
AT_CHECK(
[ovs-appctl netdev-dummy/receive p8 \
'in_port(8),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:0b,dst=50:54:00:00:00:0c),eth_type(0x0800),ipv4(src=10.0.0.3,dst=10.0.0.4,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' --len 60])
done
2017-02-23 11:27:57 -08:00
sleep 1 # wait for forwarders process packets
# Meter 1 is measuring packets, allowing one packet per second with
# bursts of one packet, so all 5 of the new packets should hit the drop
# band.
dpif-netdev: Remove meter rate from the bucket size calculation.
Implementation of meters supposed to be a classic token bucket with 2
typical parameters: rate and burst size.
Burst size in this schema is the maximum number of bytes/packets that
could pass without being rate limited.
Recent changes to userspace datapath made meter implementation to be
in line with the kernel one, and this uncovered several issues.
The main problem is that maximum bucket size for unknown reason
accounts not only burst size, but also the numerical value of rate.
This creates a lot of confusion around behavior of meters.
For example, if rate is configured as 1000 pps and burst size set to 1,
this should mean that meter will tolerate bursts of 1 packet at most,
i.e. not a single packet above the rate should pass the meter.
However, current implementation calculates maximum bucket size as
(rate + burst size), so the effective bucket size will be 1001. This
means that first 1000 packets will not be rate limited and average
rate might be twice as high as the configured rate. This also makes
it practically impossible to configure meter that will have burst size
lower than the rate, which might be a desirable configuration if the
rate is high.
Inability to configure low values of a burst size and overall inability
for a user to predict what will be a maximum and average rate from the
configured parameters of a meter without looking at the OVS and kernel
code might be also classified as a security issue, because drop meters
are frequently used as a way of protection from DoS attacks.
This change removes rate from the calculation of a bucket size, making
it in line with the classic token bucket algorithm and essentially
making the rate and burst tolerance being predictable from a users'
perspective.
Same change will be proposed for the kernel implementation.
Unit tests changed back to their correct version and enhanced.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
2021-04-21 15:48:16 +02:00
# Meter 2 is measuring kbps, with burst size 2 (== 2000 bits). After 870ms
# there should be space for 80 + 870 = 950 bits, so one new 60 byte (480 bit)
# packet should pass, remaining 4 should hit the drop band. There should be
# 470 bits left.
2017-02-23 11:27:57 -08:00
AT_CHECK([ovs-ofctl -O OpenFlow13 meter-stats br0 | strip_timers], [0], [dnl
OFPST_METER reply (OF1.3) (xid=0x2):
meter:1 flow_count:1 packet_in_count:10 byte_in_count:600 duration:0.0s bands:
dpif-netdev: Remove meter rate from the bucket size calculation.
Implementation of meters supposed to be a classic token bucket with 2
typical parameters: rate and burst size.
Burst size in this schema is the maximum number of bytes/packets that
could pass without being rate limited.
Recent changes to userspace datapath made meter implementation to be
in line with the kernel one, and this uncovered several issues.
The main problem is that maximum bucket size for unknown reason
accounts not only burst size, but also the numerical value of rate.
This creates a lot of confusion around behavior of meters.
For example, if rate is configured as 1000 pps and burst size set to 1,
this should mean that meter will tolerate bursts of 1 packet at most,
i.e. not a single packet above the rate should pass the meter.
However, current implementation calculates maximum bucket size as
(rate + burst size), so the effective bucket size will be 1001. This
means that first 1000 packets will not be rate limited and average
rate might be twice as high as the configured rate. This also makes
it practically impossible to configure meter that will have burst size
lower than the rate, which might be a desirable configuration if the
rate is high.
Inability to configure low values of a burst size and overall inability
for a user to predict what will be a maximum and average rate from the
configured parameters of a meter without looking at the OVS and kernel
code might be also classified as a security issue, because drop meters
are frequently used as a way of protection from DoS attacks.
This change removes rate from the calculation of a bucket size, making
it in line with the classic token bucket algorithm and essentially
making the rate and burst tolerance being predictable from a users'
perspective.
Same change will be proposed for the kernel implementation.
Unit tests changed back to their correct version and enhanced.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
2021-04-21 15:48:16 +02:00
0: packet_count:9 byte_count:540
2017-02-23 11:27:57 -08:00
2021-03-03 22:46:56 +08:00
meter:2 flow_count:1 packet_in_count:12 byte_in_count:720 duration:0.0s bands:
dpif-netdev: Remove meter rate from the bucket size calculation.
Implementation of meters supposed to be a classic token bucket with 2
typical parameters: rate and burst size.
Burst size in this schema is the maximum number of bytes/packets that
could pass without being rate limited.
Recent changes to userspace datapath made meter implementation to be
in line with the kernel one, and this uncovered several issues.
The main problem is that maximum bucket size for unknown reason
accounts not only burst size, but also the numerical value of rate.
This creates a lot of confusion around behavior of meters.
For example, if rate is configured as 1000 pps and burst size set to 1,
this should mean that meter will tolerate bursts of 1 packet at most,
i.e. not a single packet above the rate should pass the meter.
However, current implementation calculates maximum bucket size as
(rate + burst size), so the effective bucket size will be 1001. This
means that first 1000 packets will not be rate limited and average
rate might be twice as high as the configured rate. This also makes
it practically impossible to configure meter that will have burst size
lower than the rate, which might be a desirable configuration if the
rate is high.
Inability to configure low values of a burst size and overall inability
for a user to predict what will be a maximum and average rate from the
configured parameters of a meter without looking at the OVS and kernel
code might be also classified as a security issue, because drop meters
are frequently used as a way of protection from DoS attacks.
This change removes rate from the calculation of a bucket size, making
it in line with the classic token bucket algorithm and essentially
making the rate and burst tolerance being predictable from a users'
perspective.
Same change will be proposed for the kernel implementation.
Unit tests changed back to their correct version and enhanced.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
2021-04-21 15:48:16 +02:00
0: packet_count:7 byte_count:420
])
# Advance time by 10 ms
ovs-appctl time/warp 10
for i in `seq 1 5`; do
AT_CHECK(
[ovs-appctl netdev-dummy/receive p7 \
'in_port(7),packet_type(ns=0,id=0),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' --len 60])
done
sleep 1 # wait for forwarders process packets
# Meter 1 should remain the same as we didn't send anything that should hit it.
# Meter 2 is measuring kbps, with burst size 2 (== 2000 bits). After 10ms
# there should be space for 470 + 10 = 480 bits, so one new 60 byte (480 bit)
# packet should pass, remaining 4 should hit the drop band.
AT_CHECK([ovs-ofctl -O OpenFlow13 meter-stats br0 | strip_timers], [0], [dnl
OFPST_METER reply (OF1.3) (xid=0x2):
meter:1 flow_count:1 packet_in_count:10 byte_in_count:600 duration:0.0s bands:
0: packet_count:9 byte_count:540
meter:2 flow_count:1 packet_in_count:17 byte_in_count:1020 duration:0.0s bands:
0: packet_count:11 byte_count:660
2017-02-23 11:27:57 -08:00
])
2019-12-18 05:48:12 +01:00
ovs-appctl time/warp 5000
AT_CHECK([
ovs-appctl coverage/read-counter datapath_drop_meter
], [0], [dnl
dpif-netdev: Remove meter rate from the bucket size calculation.
Implementation of meters supposed to be a classic token bucket with 2
typical parameters: rate and burst size.
Burst size in this schema is the maximum number of bytes/packets that
could pass without being rate limited.
Recent changes to userspace datapath made meter implementation to be
in line with the kernel one, and this uncovered several issues.
The main problem is that maximum bucket size for unknown reason
accounts not only burst size, but also the numerical value of rate.
This creates a lot of confusion around behavior of meters.
For example, if rate is configured as 1000 pps and burst size set to 1,
this should mean that meter will tolerate bursts of 1 packet at most,
i.e. not a single packet above the rate should pass the meter.
However, current implementation calculates maximum bucket size as
(rate + burst size), so the effective bucket size will be 1001. This
means that first 1000 packets will not be rate limited and average
rate might be twice as high as the configured rate. This also makes
it practically impossible to configure meter that will have burst size
lower than the rate, which might be a desirable configuration if the
rate is high.
Inability to configure low values of a burst size and overall inability
for a user to predict what will be a maximum and average rate from the
configured parameters of a meter without looking at the OVS and kernel
code might be also classified as a security issue, because drop meters
are frequently used as a way of protection from DoS attacks.
This change removes rate from the calculation of a bucket size, making
it in line with the classic token bucket algorithm and essentially
making the rate and burst tolerance being predictable from a users'
perspective.
Same change will be proposed for the kernel implementation.
Unit tests changed back to their correct version and enhanced.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
2021-04-21 15:48:16 +02:00
20
2019-12-18 05:48:12 +01:00
])
2017-02-23 11:27:57 -08:00
AT_CHECK([cat ovs-vswitchd.log | filter_flow_install | strip_xout_keep_actions], [0], [dnl
2017-06-23 16:47:57 +00:00
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(frag=no), actions:meter(0),7
recirc_id(0),in_port(2),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(frag=no), actions:8
recirc_id(0),in_port(7),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(frag=no), actions:meter(1),1
recirc_id(0),in_port(8),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(frag=no), actions:2
2017-02-23 11:27:57 -08:00
])
2021-03-17 14:14:18 -04:00
AT_CHECK([ovs-ofctl -O OpenFlow13 del-meters br0])
2017-02-23 11:27:57 -08:00
OVS_VSWITCHD_STOP
AT_CLEANUP
2019-02-26 13:38:43 +03:00
m4_define([DPIF_NETDEV_FLOW_HW_OFFLOAD],
[AT_SETUP([dpif-netdev - partial hw offload - $1])
OVS_VSWITCHD_START(
2019-05-07 12:24:07 +03:00
[add-port br0 p1 -- \
2020-02-26 12:46:36 +08:00
set interface p1 type=$1 ofport_request=1 options:pstream=punix:$OVS_RUNDIR/p1.sock options:ifindex=1100 -- \
2019-02-26 13:38:43 +03:00
set bridge br0 datapath-type=dummy \
other-config:datapath-id=1234 fail-mode=secure], [], [],
[m4_if([$1], [dummy-pmd], [--dummy-numa="0,0,0,0,1,1,1,1"], [])])
AT_CHECK([ovs-appctl vlog/set dpif:file:dbg dpif_netdev:file:dbg netdev_dummy:file:dbg])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:hw-offload=true])
OVS_WAIT_UNTIL([grep "netdev: Flow API Enabled" ovs-vswitchd.log])
AT_CHECK([ovs-ofctl del-flows br0])
AT_CHECK([ovs-ofctl add-flow br0 in_port=1,actions=IN_PORT])
packet="packet_type(ns=0,id=0),eth(src=00:06:07:08:09:0a,dst=00:01:02:03:04:05),eth_type(0x0800),ipv4(src=127.0.0.1,dst=127.0.0.1,proto=0,tos=0,ttl=64,frag=no)"
AT_CHECK([ovs-appctl netdev-dummy/receive p1 $packet --len 64], [0])
OVS_WAIT_UNTIL([grep "miss upcall" ovs-vswitchd.log])
AT_CHECK([grep -A 1 'miss upcall' ovs-vswitchd.log | tail -n 1], [0], [dnl
skb_priority(0),skb_mark(0),ct_state(0),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),packet_type(ns=0,id=0),eth(src=00:06:07:08:09:0a,dst=00:01:02:03:04:05),eth_type(0x0800),ipv4(src=127.0.0.1,dst=127.0.0.1,proto=0,tos=0,ttl=64,frag=no)
])
# Check that flow successfully offloaded.
OVS_WAIT_UNTIL([grep "succeed to add netdev flow" ovs-vswitchd.log])
AT_CHECK([filter_hw_flow_install < ovs-vswitchd.log | strip_xout], [0], [dnl
2020-07-08 06:38:22 +00:00
p1: flow put[[create]]: flow match: recirc_id=0,eth,ip,in_port=1,vlan_tci=0x0000,nw_frag=no, mark: 1
2019-02-26 13:38:43 +03:00
])
# Check that datapath flow installed successfully.
AT_CHECK([filter_flow_install < ovs-vswitchd.log | strip_xout], [0], [dnl
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(frag=no), actions: <del>
])
# Inject the same packet again.
AT_CHECK([ovs-appctl netdev-dummy/receive p1 $packet --len 64], [0])
# Check for succesfull packet matching with installed offloaded flow.
AT_CHECK([filter_hw_packet_netdev_dummy < ovs-vswitchd.log | strip_xout], [0], [dnl
2020-07-08 06:38:22 +00:00
p1: packet: ip,vlan_tci=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_proto=0,nw_tos=0,nw_ecn=0,nw_ttl=64 matches with flow: recirc_id=0,eth,ip,vlan_tci=0x0000,nw_frag=no with mark: 1
2019-02-26 13:38:43 +03:00
])
ovs-appctl revalidator/wait
# Dump the datapath flow to see that actions was executed for a packet.
AT_CHECK([ovs-appctl dpif/dump-flows br0 | strip_timers], [0], [dnl
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(frag=no), packets:1, bytes:64, used:0.0s, actions:1
])
# Wait for datapath flow expiration.
ovs-appctl time/stop
ovs-appctl time/warp 15000
ovs-appctl revalidator/wait
# Check that flow successfully deleted from HW.
OVS_WAIT_UNTIL([grep "succeed to delete netdev flow" ovs-vswitchd.log])
AT_CHECK([filter_hw_flow_del < ovs-vswitchd.log | strip_xout], [0], [dnl
2020-07-08 06:38:22 +00:00
p1: flow del: mark: 1
2019-02-26 13:38:43 +03:00
])
OVS_VSWITCHD_STOP
AT_CLEANUP])
DPIF_NETDEV_FLOW_HW_OFFLOAD([dummy])
DPIF_NETDEV_FLOW_HW_OFFLOAD([dummy-pmd])
2019-10-23 22:26:52 +02:00
m4_define([DPIF_NETDEV_FLOW_HW_OFFLOAD_OFFSETS],
[AT_SETUP([dpif-netdev - partial hw offload with packet modifications - $1])
OVS_VSWITCHD_START(
[add-port br0 p1 -- \
2020-02-26 12:46:36 +08:00
set interface p1 type=$1 ofport_request=1 options:pcap=p1.pcap options:ifindex=1101 -- \
2019-10-23 22:26:52 +02:00
set bridge br0 datapath-type=dummy \
other-config:datapath-id=1234 fail-mode=secure], [], [],
[m4_if([$1], [dummy-pmd], [--dummy-numa="0,0,0,0,1,1,1,1"], [])])
AT_CHECK([ovs-appctl vlog/set dpif:file:dbg dpif_netdev:file:dbg netdev_dummy:file:dbg])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:hw-offload=true])
OVS_WAIT_UNTIL([grep "netdev: Flow API Enabled" ovs-vswitchd.log])
AT_CHECK([ovs-ofctl del-flows br0])
# Setting flow to modify ipv4 src address and udp dst port to be sure that
# offloaded packets has correctly initialized l3/l4 offsets.
AT_CHECK([ovs-ofctl add-flow br0 in_port=1,udp,actions=mod_nw_src:192.168.0.7,mod_tp_dst:3773,output:IN_PORT])
packet="packet_type(ns=0,id=0),eth(src=00:06:07:08:09:0a,dst=00:01:02:03:04:05),eth_type(0x8100),vlan(vid=99,pcp=7),encap(eth_type(0x0800),ipv4(src=127.0.0.1,dst=127.0.0.1,proto=17,ttl=64,frag=no),udp(src=81,dst=82))"
AT_CHECK([ovs-appctl netdev-dummy/receive p1 $packet --len 64], [0])
OVS_WAIT_UNTIL([grep "miss upcall" ovs-vswitchd.log])
AT_CHECK([grep -A 1 'miss upcall' ovs-vswitchd.log | tail -n 1], [0], [dnl
skb_priority(0),skb_mark(0),ct_state(0),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),dnl
packet_type(ns=0,id=0),eth(src=00:06:07:08:09:0a,dst=00:01:02:03:04:05),eth_type(0x8100),vlan(vid=99,pcp=7),encap(eth_type(0x0800),ipv4(src=127.0.0.1,dst=127.0.0.1,proto=17,tos=0,ttl=64,frag=no),udp(src=81,dst=82))
])
# Check that flow successfully offloaded.
OVS_WAIT_UNTIL([grep "succeed to add netdev flow" ovs-vswitchd.log])
AT_CHECK([filter_hw_flow_install < ovs-vswitchd.log | strip_xout], [0], [dnl
2020-07-08 06:38:22 +00:00
p1: flow put[[create]]: flow match: recirc_id=0,eth,udp,in_port=1,dl_vlan=99,dl_vlan_pcp=7,nw_src=127.0.0.1,nw_frag=no,tp_dst=82, mark: 1
2019-10-23 22:26:52 +02:00
])
# Check that datapath flow installed successfully.
AT_CHECK([filter_flow_install < ovs-vswitchd.log | strip_xout], [0], [dnl
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x8100),vlan(vid=99,pcp=7),encap(eth_type(0x0800),ipv4(src=127.0.0.1,proto=17,frag=no),udp(dst=82)), actions: <del>
])
# Inject the same packet again.
AT_CHECK([ovs-appctl netdev-dummy/receive p1 $packet --len 64], [0])
# Check for succesfull packet matching with installed offloaded flow.
AT_CHECK([filter_hw_packet_netdev_dummy < ovs-vswitchd.log | strip_xout], [0], [dnl
p1: packet: udp,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=81,tp_dst=82 dnl
2020-07-08 06:38:22 +00:00
matches with flow: recirc_id=0,eth,udp,dl_vlan=99,dl_vlan_pcp=7,nw_src=127.0.0.1,nw_frag=no,tp_dst=82 with mark: 1
2019-10-23 22:26:52 +02:00
])
ovs-appctl revalidator/wait
# Dump the datapath flow to see that actions was executed for a packet.
AT_CHECK([ovs-appctl dpif/dump-flows br0 | strip_timers], [0], [dnl
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x8100),vlan(vid=99,pcp=7),encap(eth_type(0x0800),ipv4(src=127.0.0.1,proto=17,frag=no),udp(dst=82)), dnl
packets:1, bytes:64, used:0.0s, actions:set(ipv4(src=192.168.0.7)),set(udp(dst=3773)),1
])
# Wait for datapath flow expiration.
ovs-appctl time/stop
ovs-appctl time/warp 15000
ovs-appctl revalidator/wait
# Check that flow successfully deleted from HW.
OVS_WAIT_UNTIL([grep "succeed to delete netdev flow" ovs-vswitchd.log])
AT_CHECK([filter_hw_flow_del < ovs-vswitchd.log | strip_xout], [0], [dnl
2020-07-08 06:38:22 +00:00
p1: flow del: mark: 1
2019-10-23 22:26:52 +02:00
])
# Check that ip address and udp port were correctly modified in output packets.
AT_CHECK([ovs-ofctl parse-pcap p1.pcap], [0], [dnl
udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=81,tp_dst=82
udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=192.168.0.7,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=81,tp_dst=3773
udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=127.0.0.1,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=81,tp_dst=82
udp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,nw_src=192.168.0.7,nw_dst=127.0.0.1,nw_tos=0,nw_ecn=0,nw_ttl=64,tp_src=81,tp_dst=3773
])
OVS_VSWITCHD_STOP
AT_CLEANUP])
DPIF_NETDEV_FLOW_HW_OFFLOAD_OFFSETS([dummy])
DPIF_NETDEV_FLOW_HW_OFFLOAD_OFFSETS([dummy-pmd])
2020-02-18 13:49:12 +00:00
m4_define([DPIF_NETDEV_FLOW_HW_OFFLOAD_OFFSETS_VID_ARP],
[AT_SETUP([dpif-netdev - partial hw offload with arp vlan id packet modifications - $1])
OVS_VSWITCHD_START(
[add-port br0 p1 -- \
set interface p1 type=$1 ofport_request=1 options:pcap=p1.pcap options:ifindex=1102 -- \
set bridge br0 datapath-type=dummy \
other-config:datapath-id=1234 fail-mode=secure], [], [],
[m4_if([$1], [dummy-pmd], [--dummy-numa="0,0,0,0,1,1,1,1"], [])])
AT_CHECK([ovs-appctl vlog/set dpif:file:dbg dpif_netdev:file:dbg netdev_dummy:file:dbg])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:hw-offload=true])
OVS_WAIT_UNTIL([grep "netdev: Flow API Enabled" ovs-vswitchd.log])
AT_CHECK([ovs-ofctl del-flows br0])
# Setting flow to modify vlan id with arp packet to be sure that
# offloaded packets has correctly initialized l3 offset.
AT_CHECK([ovs-ofctl add-flow br0 in_port=1,arp,dl_vlan=99,actions=mod_vlan_vid=11,output:IN_PORT])
packet="packet_type(ns=0,id=0),eth(src=00:06:07:08:09:0a,dst=00:01:02:03:04:05),eth_type(0x8100),vlan(vid=99,pcp=7),encap(eth_type(0x0806),arp(sip=127.0.0.1,tip=127.0.0.1,op=1,sha=00:0b:0c:0d:0e:0f,tha=00:00:00:00:00:00))"
AT_CHECK([ovs-appctl netdev-dummy/receive p1 $packet --len 64], [0])
OVS_WAIT_UNTIL([grep "miss upcall" ovs-vswitchd.log])
AT_CHECK([grep -A 1 'miss upcall' ovs-vswitchd.log | tail -n 1], [0], [dnl
skb_priority(0),skb_mark(0),ct_state(0),ct_zone(0),ct_mark(0),ct_label(0),recirc_id(0),dp_hash(0),in_port(1),dnl
packet_type(ns=0,id=0),eth(src=00:06:07:08:09:0a,dst=00:01:02:03:04:05),eth_type(0x8100),vlan(vid=99,pcp=7),encap(eth_type(0x0806),arp(sip=127.0.0.1,tip=127.0.0.1,op=1,sha=00:0b:0c:0d:0e:0f,tha=00:00:00:00:00:00))
])
# Check that flow successfully offloaded.
OVS_WAIT_UNTIL([grep "succeed to add netdev flow" ovs-vswitchd.log])
AT_CHECK([filter_hw_flow_install < ovs-vswitchd.log | strip_xout], [0], [dnl
2020-07-08 06:38:22 +00:00
p1: flow put[[create]]: flow match: recirc_id=0,eth,arp,in_port=1,dl_vlan=99,dl_vlan_pcp=7, mark: 1
2020-02-18 13:49:12 +00:00
])
# Check that datapath flow installed successfully.
AT_CHECK([filter_flow_install < ovs-vswitchd.log | strip_xout], [0], [dnl
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x8100),vlan(vid=99,pcp=7),encap(eth_type(0x0806)), actions: <del>
])
# Inject the same packet again.
AT_CHECK([ovs-appctl netdev-dummy/receive p1 $packet --len 64], [0])
# Check for succesfull packet matching with installed offloaded flow.
AT_CHECK([filter_hw_packet_netdev_dummy < ovs-vswitchd.log | strip_xout], [0], [dnl
p1: packet: arp,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,arp_spa=127.0.0.1,arp_tpa=127.0.0.1,arp_op=1,arp_sha=00:0b:0c:0d:0e:0f,arp_tha=00:00:00:00:00:00 dnl
2020-07-08 06:38:22 +00:00
matches with flow: recirc_id=0,eth,arp,dl_vlan=99,dl_vlan_pcp=7 with mark: 1
2020-02-18 13:49:12 +00:00
])
ovs-appctl revalidator/wait
# Dump the datapath flow to see that actions was executed for a packet.
AT_CHECK([ovs-appctl dpif/dump-flows br0 | strip_timers], [0], [dnl
recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x8100),vlan(vid=99,pcp=7),encap(eth_type(0x0806)), dnl
packets:1, bytes:64, used:0.0s, actions:pop_vlan,push_vlan(vid=11,pcp=7),1
])
# Wait for datapath flow expiration.
ovs-appctl time/stop
ovs-appctl time/warp 15000
ovs-appctl revalidator/wait
# Check that flow successfully deleted from HW.
OVS_WAIT_UNTIL([grep "succeed to delete netdev flow" ovs-vswitchd.log])
AT_CHECK([filter_hw_flow_del < ovs-vswitchd.log | strip_xout], [0], [dnl
2020-07-08 06:38:22 +00:00
p1: flow del: mark: 1
2020-02-18 13:49:12 +00:00
])
# Check that VLAN ID was correctly modified in output packets.
AT_CHECK([ovs-ofctl parse-pcap p1.pcap], [0], [dnl
arp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,arp_spa=127.0.0.1,arp_tpa=127.0.0.1,arp_op=1,arp_sha=00:0b:0c:0d:0e:0f,arp_tha=00:00:00:00:00:00
arp,in_port=ANY,dl_vlan=11,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,arp_spa=127.0.0.1,arp_tpa=127.0.0.1,arp_op=1,arp_sha=00:0b:0c:0d:0e:0f,arp_tha=00:00:00:00:00:00
arp,in_port=ANY,dl_vlan=99,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,arp_spa=127.0.0.1,arp_tpa=127.0.0.1,arp_op=1,arp_sha=00:0b:0c:0d:0e:0f,arp_tha=00:00:00:00:00:00
arp,in_port=ANY,dl_vlan=11,dl_vlan_pcp=7,vlan_tci1=0x0000,dl_src=00:06:07:08:09:0a,dl_dst=00:01:02:03:04:05,arp_spa=127.0.0.1,arp_tpa=127.0.0.1,arp_op=1,arp_sha=00:0b:0c:0d:0e:0f,arp_tha=00:00:00:00:00:00
])
OVS_VSWITCHD_STOP
AT_CLEANUP])
DPIF_NETDEV_FLOW_HW_OFFLOAD_OFFSETS_VID_ARP([dummy])
DPIF_NETDEV_FLOW_HW_OFFLOAD_OFFSETS_VID_ARP([dummy-pmd])
dpif-netdev: Fix crash when add dp flow without in_port field.
Userspace datapath relies on fact that every datapath flow has exact
match on the in_port, but flows without in_port match could be
added directly via dpctl commands. Even though dpctl is a debug
interface, datapath should just reject such flows instead of
crashing on assertion.
Fix the following crash and add a unit test for this issue
to tests/dpif-netdev.at:
$ ovs-appctl dpctl/add-flow "eth(),eth_type(0x0800),ipv4()" "3"
unixctl|WARN|error communicating with unix:ovs-vswitchd.ctl: End of file
ovs-appctl: ovs-vswitchd: transaction error (End of file)
ovs-vswitchd.log record:
util(ovs-vswitchd)|EMER|lib/dpif-netdev.c:3638:
assertion match->wc.masks.in_port.odp_port == ODPP_NONE failed
in dp_netdev_flow_add()
daemon_unix(monitor)|ERR|2 crashes: pid 1995 died, killed (Aborted),
core dumped, restarting
Fix result:
$ ovs-appctl dpctl/add-flow "eth(),eth_type(0x0800),ipv4()" "3"
ovs-vswitchd: updating flow table (Invalid argument)
ovs-appctl: ovs-vswitchd: server returned an error
ovs-vswitchd.log record:
dpif_netdev|ERR|failed to put[create] flow: in_port is not an exact match
dpif|WARN|netdev@ovs-netdev: failed to put[create] (Invalid argument)
ufid:7e...d1 eth(src=00..00,dst=00..00),eth_type(0x0800),
ipv4(src=0.0.0.0/0.0.0.0,dst=0.0.0.0/0.0.0.0,proto=0/0,tos=0/0,ttl=0/0), actions:3
Signed-off-by: Mao YingMing <maoyingming@baidu.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2021-02-26 17:48:59 +08:00
AT_SETUP([dpif-netdev - check dpctl/add-flow in_port exact match])
OVS_VSWITCHD_START(
[add-port br0 p1 \
-- set interface p1 type=dummy options:pstream=punix:$OVS_RUNDIR/p0.sock \
-- set bridge br0 datapath-type=dummy \
other-config:datapath-id=1234 fail-mode=secure])
AT_CHECK([ovs-appctl dpctl/add-flow "eth(),eth_type(0x0800),ipv4()" "3"], [2],
[], [dnl
ovs-vswitchd: updating flow table (Invalid argument)
ovs-appctl: ovs-vswitchd: server returned an error
])
OVS_WAIT_UNTIL([grep "flow: in_port is not an exact match" ovs-vswitchd.log])
OVS_VSWITCHD_STOP(["/flow: in_port is not an exact match/d
/failed to put/d"])
AT_CLEANUP