2
0
mirror of https://github.com/openvswitch/ovs synced 2025-09-03 15:55:19 +00:00

dpif-netdev: Change definitions of 'idle' & 'processing' cycles

Instead of counting all polling cycles as processing cycles, only count
the cycles where packets were received from the polling.

Signed-off-by: Georg Schmuecking <georg.schmuecking@ericsson.com>
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
Co-authored-by: Georg Schmuecking <georg.schmuecking@ericsson.com>
Acked-by: Kevin Traynor <ktraynor@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
Acked-by: Ian Stokes <ian.stokes@intel.com>
Tested-by: Ian Stokes <ian.stokes@intel.com>
Acked-by: Darrell Ball <dlu998@gmail.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
This commit is contained in:
Ciara Loftus
2017-02-20 12:53:00 +00:00
committed by Ben Pfaff
parent a91c4cfaf8
commit a2ac666d52
2 changed files with 48 additions and 15 deletions

View File

@@ -326,8 +326,9 @@ enum dp_stat_type {
}; };
enum pmd_cycles_counter_type { enum pmd_cycles_counter_type {
PMD_CYCLES_POLLING, /* Cycles spent polling NICs. */ PMD_CYCLES_IDLE, /* Cycles spent idle or unsuccessful polling */
PMD_CYCLES_PROCESSING, /* Cycles spent processing packets */ PMD_CYCLES_PROCESSING, /* Cycles spent successfully polling and
* processing polled packets */
PMD_N_CYCLES PMD_N_CYCLES
}; };
@@ -804,10 +805,10 @@ pmd_info_show_stats(struct ds *reply,
} }
ds_put_format(reply, ds_put_format(reply,
"\tpolling cycles:%"PRIu64" (%.02f%%)\n" "\tidle cycles:%"PRIu64" (%.02f%%)\n"
"\tprocessing cycles:%"PRIu64" (%.02f%%)\n", "\tprocessing cycles:%"PRIu64" (%.02f%%)\n",
cycles[PMD_CYCLES_POLLING], cycles[PMD_CYCLES_IDLE],
cycles[PMD_CYCLES_POLLING] / (double)total_cycles * 100, cycles[PMD_CYCLES_IDLE] / (double)total_cycles * 100,
cycles[PMD_CYCLES_PROCESSING], cycles[PMD_CYCLES_PROCESSING],
cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles * 100); cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles * 100);
@@ -3079,30 +3080,43 @@ cycles_count_end(struct dp_netdev_pmd_thread *pmd,
non_atomic_ullong_add(&pmd->cycles.n[type], interval); non_atomic_ullong_add(&pmd->cycles.n[type], interval);
} }
static void /* Calculate the intermediate cycle result and add to the counter 'type' */
static inline void
cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
enum pmd_cycles_counter_type type)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
unsigned long long new_cycles = cycles_counter();
unsigned long long interval = new_cycles - pmd->last_cycles;
pmd->last_cycles = new_cycles;
non_atomic_ullong_add(&pmd->cycles.n[type], interval);
}
static int
dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
struct netdev_rxq *rx, struct netdev_rxq *rx,
odp_port_t port_no) odp_port_t port_no)
{ {
struct dp_packet_batch batch; struct dp_packet_batch batch;
int error; int error;
int batch_cnt = 0;
dp_packet_batch_init(&batch); dp_packet_batch_init(&batch);
cycles_count_start(pmd);
error = netdev_rxq_recv(rx, &batch); error = netdev_rxq_recv(rx, &batch);
cycles_count_end(pmd, PMD_CYCLES_POLLING);
if (!error) { if (!error) {
*recirc_depth_get() = 0; *recirc_depth_get() = 0;
cycles_count_start(pmd); batch_cnt = batch.count;
dp_netdev_input(pmd, &batch, port_no); dp_netdev_input(pmd, &batch, port_no);
cycles_count_end(pmd, PMD_CYCLES_PROCESSING);
} else if (error != EAGAIN && error != EOPNOTSUPP) { } else if (error != EAGAIN && error != EOPNOTSUPP) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_ERR_RL(&rl, "error receiving data from %s: %s", VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
netdev_rxq_get_name(rx), ovs_strerror(error)); netdev_rxq_get_name(rx), ovs_strerror(error));
} }
return batch_cnt;
} }
static struct tx_port * static struct tx_port *
@@ -3565,21 +3579,29 @@ dpif_netdev_run(struct dpif *dpif)
struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_pmd_thread *non_pmd; struct dp_netdev_pmd_thread *non_pmd;
uint64_t new_tnl_seq; uint64_t new_tnl_seq;
int process_packets = 0;
ovs_mutex_lock(&dp->port_mutex); ovs_mutex_lock(&dp->port_mutex);
non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID); non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
if (non_pmd) { if (non_pmd) {
ovs_mutex_lock(&dp->non_pmd_mutex); ovs_mutex_lock(&dp->non_pmd_mutex);
cycles_count_start(non_pmd);
HMAP_FOR_EACH (port, node, &dp->ports) { HMAP_FOR_EACH (port, node, &dp->ports) {
if (!netdev_is_pmd(port->netdev)) { if (!netdev_is_pmd(port->netdev)) {
int i; int i;
for (i = 0; i < port->n_rxq; i++) { for (i = 0; i < port->n_rxq; i++) {
dp_netdev_process_rxq_port(non_pmd, port->rxqs[i].rx, process_packets =
port->port_no); dp_netdev_process_rxq_port(non_pmd,
port->rxqs[i].rx,
port->port_no);
cycles_count_intermediate(non_pmd, process_packets ?
PMD_CYCLES_PROCESSING
: PMD_CYCLES_IDLE);
} }
} }
} }
cycles_count_end(non_pmd, PMD_CYCLES_IDLE);
dpif_netdev_xps_revalidate_pmd(non_pmd, time_msec(), false); dpif_netdev_xps_revalidate_pmd(non_pmd, time_msec(), false);
ovs_mutex_unlock(&dp->non_pmd_mutex); ovs_mutex_unlock(&dp->non_pmd_mutex);
@@ -3704,6 +3726,7 @@ pmd_thread_main(void *f_)
bool exiting; bool exiting;
int poll_cnt; int poll_cnt;
int i; int i;
int process_packets = 0;
poll_list = NULL; poll_list = NULL;
@@ -3730,10 +3753,15 @@ reload:
lc = UINT_MAX; lc = UINT_MAX;
} }
cycles_count_start(pmd);
for (;;) { for (;;) {
for (i = 0; i < poll_cnt; i++) { for (i = 0; i < poll_cnt; i++) {
dp_netdev_process_rxq_port(pmd, poll_list[i].rx, process_packets =
poll_list[i].port_no); dp_netdev_process_rxq_port(pmd, poll_list[i].rx,
poll_list[i].port_no);
cycles_count_intermediate(pmd,
process_packets ? PMD_CYCLES_PROCESSING
: PMD_CYCLES_IDLE);
} }
if (lc++ > 1024) { if (lc++ > 1024) {
@@ -3754,6 +3782,8 @@ reload:
} }
} }
cycles_count_end(pmd, PMD_CYCLES_IDLE);
poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list); poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
exiting = latch_is_set(&pmd->exit_latch); exiting = latch_is_set(&pmd->exit_latch);
/* Signal here to make sure the pmd finishes /* Signal here to make sure the pmd finishes

View File

@@ -269,7 +269,10 @@ The sum of ``emc hits'', ``masked hits'' and ``miss'' is the number of
packets received by the datapath. Cycles are counted using the TSC or similar packets received by the datapath. Cycles are counted using the TSC or similar
facilities (when available on the platform). To reset these counters use facilities (when available on the platform). To reset these counters use
\fBdpif-netdev/pmd-stats-clear\fR. The duration of one cycle depends on the \fBdpif-netdev/pmd-stats-clear\fR. The duration of one cycle depends on the
measuring infrastructure. measuring infrastructure. ``idle cycles'' refers to cycles spent polling
devices but not receiving any packets. ``processing cycles'' refers to cycles
spent polling devices and successfully receiving packets, plus the cycles
spent processing said packets.
.IP "\fBdpif-netdev/pmd-stats-clear\fR [\fIdp\fR]" .IP "\fBdpif-netdev/pmd-stats-clear\fR [\fIdp\fR]"
Resets to zero the per pmd thread performance numbers shown by the Resets to zero the per pmd thread performance numbers shown by the
\fBdpif-netdev/pmd-stats-show\fR command. It will NOT reset datapath or \fBdpif-netdev/pmd-stats-show\fR command. It will NOT reset datapath or