mirror of
https://github.com/openvswitch/ovs
synced 2025-08-31 14:25:26 +00:00
dpdk: Ditch MAX_PKT_BURST macro.
The MAX_PKT_BURST and NETDEV_MAX_RX_BATCH macros had a confusing relationship. They basically purport to do the same thing, making it unclear which is the source of truth. Furthermore, while NETDEV_MAX_RX_BATCH was 256, MAX_PKT_BURST was 32, meaning we never process a batch larger than 32 packets further adding to the confusion. This patch resolves the issue by removing MAX_PKT_BURST completely, and shrinking the new NETDEV_MAX_BURST macro to only 32. This should have no change in the execution path except shrinking a couple of structs and memory allocations (can't hurt). Signed-off-by: Ethan Jackson <ethan@nicira.com> Acked-by: Daniele Di Proietto <diproiettod@vmware.com>
This commit is contained in:
@@ -2508,7 +2508,7 @@ dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
|
||||
struct dp_netdev_port *port,
|
||||
struct netdev_rxq *rxq)
|
||||
{
|
||||
struct dp_packet *packets[NETDEV_MAX_RX_BATCH];
|
||||
struct dp_packet *packets[NETDEV_MAX_BURST];
|
||||
int error, cnt;
|
||||
|
||||
cycles_count_start(pmd);
|
||||
@@ -3035,7 +3035,7 @@ struct packet_batch {
|
||||
|
||||
struct dp_netdev_flow *flow;
|
||||
|
||||
struct dp_packet *packets[NETDEV_MAX_RX_BATCH];
|
||||
struct dp_packet *packets[NETDEV_MAX_BURST];
|
||||
};
|
||||
|
||||
static inline void
|
||||
@@ -3159,7 +3159,7 @@ fast_path_processing(struct dp_netdev_pmd_thread *pmd,
|
||||
const size_t PKT_ARRAY_SIZE = cnt;
|
||||
#else
|
||||
/* Sparse or MSVC doesn't like variable length array. */
|
||||
enum { PKT_ARRAY_SIZE = NETDEV_MAX_RX_BATCH };
|
||||
enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
|
||||
#endif
|
||||
struct dpcls_rule *rules[PKT_ARRAY_SIZE];
|
||||
struct dp_netdev *dp = pmd->dp;
|
||||
@@ -3285,7 +3285,7 @@ dp_netdev_input(struct dp_netdev_pmd_thread *pmd,
|
||||
const size_t PKT_ARRAY_SIZE = cnt;
|
||||
#else
|
||||
/* Sparse or MSVC doesn't like variable length array. */
|
||||
enum { PKT_ARRAY_SIZE = NETDEV_MAX_RX_BATCH };
|
||||
enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
|
||||
#endif
|
||||
struct netdev_flow_key keys[PKT_ARRAY_SIZE];
|
||||
struct packet_batch batches[PKT_ARRAY_SIZE];
|
||||
@@ -3382,7 +3382,7 @@ dp_execute_cb(void *aux_, struct dp_packet **packets, int cnt,
|
||||
|
||||
case OVS_ACTION_ATTR_TUNNEL_PUSH:
|
||||
if (*depth < MAX_RECIRC_DEPTH) {
|
||||
struct dp_packet *tnl_pkt[NETDEV_MAX_RX_BATCH];
|
||||
struct dp_packet *tnl_pkt[NETDEV_MAX_BURST];
|
||||
int err;
|
||||
|
||||
if (!may_steal) {
|
||||
@@ -3408,7 +3408,7 @@ dp_execute_cb(void *aux_, struct dp_packet **packets, int cnt,
|
||||
|
||||
p = dp_netdev_lookup_port(dp, portno);
|
||||
if (p) {
|
||||
struct dp_packet *tnl_pkt[NETDEV_MAX_RX_BATCH];
|
||||
struct dp_packet *tnl_pkt[NETDEV_MAX_BURST];
|
||||
int err;
|
||||
|
||||
if (!may_steal) {
|
||||
@@ -3470,7 +3470,7 @@ dp_execute_cb(void *aux_, struct dp_packet **packets, int cnt,
|
||||
|
||||
case OVS_ACTION_ATTR_RECIRC:
|
||||
if (*depth < MAX_RECIRC_DEPTH) {
|
||||
struct dp_packet *recirc_pkts[NETDEV_MAX_RX_BATCH];
|
||||
struct dp_packet *recirc_pkts[NETDEV_MAX_BURST];
|
||||
|
||||
if (!may_steal) {
|
||||
dp_netdev_clone_pkt_batch(recirc_pkts, packets, cnt);
|
||||
@@ -3829,7 +3829,7 @@ dpcls_lookup(const struct dpcls *cls, const struct netdev_flow_key keys[],
|
||||
#if !defined(__CHECKER__) && !defined(_WIN32)
|
||||
const int N_MAPS = DIV_ROUND_UP(cnt, MAP_BITS);
|
||||
#else
|
||||
enum { N_MAPS = DIV_ROUND_UP(NETDEV_MAX_RX_BATCH, MAP_BITS) };
|
||||
enum { N_MAPS = DIV_ROUND_UP(NETDEV_MAX_BURST, MAP_BITS) };
|
||||
#endif
|
||||
map_type maps[N_MAPS];
|
||||
struct dpcls_subtable *subtable;
|
||||
|
@@ -99,8 +99,6 @@ BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
|
||||
#define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
|
||||
#define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
|
||||
|
||||
#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
|
||||
|
||||
/* Character device cuse_dev_name. */
|
||||
static char *cuse_dev_name = NULL;
|
||||
|
||||
@@ -862,7 +860,7 @@ netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
|
||||
nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid,
|
||||
vhost_dev->dpdk_mp->mp,
|
||||
(struct rte_mbuf **)packets,
|
||||
MAX_PKT_BURST);
|
||||
NETDEV_MAX_BURST);
|
||||
if (!nb_rx) {
|
||||
return EAGAIN;
|
||||
}
|
||||
@@ -889,8 +887,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
|
||||
|
||||
nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
|
||||
(struct rte_mbuf **) packets,
|
||||
MIN((int) NETDEV_MAX_RX_BATCH,
|
||||
(int) MAX_PKT_BURST));
|
||||
NETDEV_MAX_BURST);
|
||||
if (!nb_rx) {
|
||||
return EAGAIN;
|
||||
}
|
||||
@@ -1008,7 +1005,7 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
|
||||
const size_t PKT_ARRAY_SIZE = cnt;
|
||||
#else
|
||||
/* Sparse or MSVC doesn't like variable length array. */
|
||||
enum { PKT_ARRAY_SIZE = NETDEV_MAX_RX_BATCH };
|
||||
enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
|
||||
#endif
|
||||
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
|
||||
struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
|
||||
|
@@ -338,7 +338,7 @@ typedef void netdev_dump_queue_stats_cb(unsigned int queue_id,
|
||||
int netdev_dump_queue_stats(const struct netdev *,
|
||||
netdev_dump_queue_stats_cb *, void *aux);
|
||||
|
||||
enum { NETDEV_MAX_RX_BATCH = 256 }; /* Maximum number packets in rx_recv() batch. */
|
||||
enum { NETDEV_MAX_BURST = 32 }; /* Maximum number packets in a batch. */
|
||||
extern struct seq *tnl_conf_seq;
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
Reference in New Issue
Block a user