2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-31 14:25:26 +00:00

dpif-netdev: Move rxq management into functions.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
Signed-off-by: Daniele Di Proietto <diproiettod@vmware.com>
This commit is contained in:
Ilya Maximets
2016-02-08 18:30:30 +03:00
committed by Daniele Di Proietto
parent 762d146ab7
commit cc245ce87d

View File

@@ -497,6 +497,13 @@ dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos);
static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp);
static void dp_netdev_del_pmds_on_numa(struct dp_netdev *dp, int numa_id);
static void dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id);
static void dp_netdev_pmd_clear_poll_list(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_del_port_from_pmd(struct dp_netdev_port *port,
struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_del_port_from_all_pmds(struct dp_netdev *dp,
struct dp_netdev_port *port);
static void
dp_netdev_add_port_to_pmds(struct dp_netdev *dp, struct dp_netdev_port *port);
static void
dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_port *port, struct netdev_rxq *rx);
@@ -1176,35 +1183,7 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
cmap_insert(&dp->ports, &port->node, hash_port_no(port_no));
if (netdev_is_pmd(netdev)) {
int numa_id = netdev_get_numa_id(netdev);
struct dp_netdev_pmd_thread *pmd;
struct hmapx to_reload;
struct hmapx_node *node;
hmapx_init(&to_reload);
/* Cannot create pmd threads for invalid numa node. */
ovs_assert(ovs_numa_numa_id_is_valid(numa_id));
for (i = 0; i < netdev_n_rxq(netdev); i++) {
pmd = dp_netdev_less_loaded_pmd_on_numa(dp, numa_id);
if (!pmd) {
/* There is no pmd threads on this numa node. */
dp_netdev_set_pmds_on_numa(dp, numa_id);
/* Assigning of rx queues done. */
break;
}
ovs_mutex_lock(&pmd->poll_mutex);
dp_netdev_add_rxq_to_pmd(pmd, port, port->rxq[i]);
ovs_mutex_unlock(&pmd->poll_mutex);
hmapx_add(&to_reload, pmd);
}
HMAPX_FOR_EACH (node, &to_reload) {
pmd = (struct dp_netdev_pmd_thread *) node->data;
dp_netdev_reload_pmd__(pmd);
}
hmapx_destroy(&to_reload);
dp_netdev_add_port_to_pmds(dp, port);
}
seq_change(dp->port_seq);
@@ -1392,29 +1371,7 @@ do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)
if (!has_pmd_port_for_numa(dp, numa_id)) {
dp_netdev_del_pmds_on_numa(dp, numa_id);
} else {
struct dp_netdev_pmd_thread *pmd;
struct rxq_poll *poll, *next;
CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
if (pmd->numa_id == numa_id) {
bool found = false;
ovs_mutex_lock(&pmd->poll_mutex);
LIST_FOR_EACH_SAFE (poll, next, node, &pmd->poll_list) {
if (poll->port == port) {
found = true;
port_unref(poll->port);
list_remove(&poll->node);
pmd->poll_cnt--;
free(poll);
}
}
ovs_mutex_unlock(&pmd->poll_mutex);
if (found) {
dp_netdev_reload_pmd__(pmd);
}
}
}
dp_netdev_del_port_from_all_pmds(dp, port);
}
}
@@ -2936,8 +2893,6 @@ dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
static void
dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd)
{
struct rxq_poll *poll;
/* Uninit the 'flow_cache' since there is
* no actual thread uninit it for NON_PMD_CORE_ID. */
if (pmd->core_id == NON_PMD_CORE_ID) {
@@ -2950,10 +2905,7 @@ dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd)
}
/* Unref all ports and free poll_list. */
LIST_FOR_EACH_POP (poll, node, &pmd->poll_list) {
port_unref(poll->port);
free(poll);
}
dp_netdev_pmd_clear_poll_list(pmd);
/* Purges the 'pmd''s flows after stopping the thread, but before
* destroying the flows, so that the flow stats can be collected. */
@@ -3036,6 +2988,62 @@ dp_netdev_del_pmds_on_numa(struct dp_netdev *dp, int numa_id)
free(free_idx);
}
/* Deletes all rx queues from pmd->poll_list. */
static void
dp_netdev_pmd_clear_poll_list(struct dp_netdev_pmd_thread *pmd)
{
struct rxq_poll *poll;
ovs_mutex_lock(&pmd->poll_mutex);
LIST_FOR_EACH_POP (poll, node, &pmd->poll_list) {
port_unref(poll->port);
free(poll);
}
pmd->poll_cnt = 0;
ovs_mutex_unlock(&pmd->poll_mutex);
}
/* Deletes all rx queues of 'port' from poll_list of pmd thread and
* reloads it if poll_list was changed. */
static void
dp_netdev_del_port_from_pmd(struct dp_netdev_port *port,
struct dp_netdev_pmd_thread *pmd)
{
struct rxq_poll *poll, *next;
bool found = false;
ovs_mutex_lock(&pmd->poll_mutex);
LIST_FOR_EACH_SAFE (poll, next, node, &pmd->poll_list) {
if (poll->port == port) {
found = true;
port_unref(poll->port);
list_remove(&poll->node);
pmd->poll_cnt--;
free(poll);
}
}
ovs_mutex_unlock(&pmd->poll_mutex);
if (found) {
dp_netdev_reload_pmd__(pmd);
}
}
/* Deletes all rx queues of 'port' from all pmd threads of dp and
* reloads them if needed. */
static void
dp_netdev_del_port_from_all_pmds(struct dp_netdev *dp,
struct dp_netdev_port *port)
{
int numa_id = netdev_get_numa_id(port->netdev);
struct dp_netdev_pmd_thread *pmd;
CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
if (pmd->numa_id == numa_id) {
dp_netdev_del_port_from_pmd(port, pmd);
}
}
}
/* Returns PMD thread from this numa node with fewer rx queues to poll.
* Returns NULL if there is no PMD threads on this numa node.
* Can be called safely only by main thread. */
@@ -3072,6 +3080,45 @@ dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
pmd->poll_cnt++;
}
/* Distributes all rx queues of 'port' between all PMD threads and reloads
* them if needed. */
static void
dp_netdev_add_port_to_pmds(struct dp_netdev *dp, struct dp_netdev_port *port)
{
int numa_id = netdev_get_numa_id(port->netdev);
struct dp_netdev_pmd_thread *pmd;
struct hmapx to_reload;
struct hmapx_node *node;
int i;
hmapx_init(&to_reload);
/* Cannot create pmd threads for invalid numa node. */
ovs_assert(ovs_numa_numa_id_is_valid(numa_id));
for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
pmd = dp_netdev_less_loaded_pmd_on_numa(dp, numa_id);
if (!pmd) {
/* There is no pmd threads on this numa node. */
dp_netdev_set_pmds_on_numa(dp, numa_id);
/* Assigning of rx queues done. */
break;
}
ovs_mutex_lock(&pmd->poll_mutex);
dp_netdev_add_rxq_to_pmd(pmd, port, port->rxq[i]);
ovs_mutex_unlock(&pmd->poll_mutex);
hmapx_add(&to_reload, pmd);
}
HMAPX_FOR_EACH (node, &to_reload) {
pmd = (struct dp_netdev_pmd_thread *) node->data;
dp_netdev_reload_pmd__(pmd);
}
hmapx_destroy(&to_reload);
}
/* Checks the numa node id of 'netdev' and starts pmd threads for
* the numa node. */
static void