2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-22 18:07:40 +00:00

Check and allocate free qdisc queue id for ports with qos parameters

ovn-northd processes the list of Port_Bindings and hashes the list of
queues per chassis. When it finds a port with qos_parameters and without
a queue_id, it allocates a free queue for the chassis that this port belongs.
The queue_id information is stored in the options field of Port_binding table.
Adds an action set_queue to the ingress table 0 of the logical flows
which will be translated to openflow set_queue by ovn-controller

ovn-controller opens the netdev corresponding to the tunnel interface's
status:tunnel_egress_iface value and configures a HTB qdisc on it. Then for
each SB port_binding that has queue_id set, it allocates a queue with the
qos_parameters of that port. It also frees up unused queues.

This patch replaces the older approach of policing

Signed-off-by: Babu Shanmugam <bschanmu@redhat.com>
Signed-off-by: Ben Pfaff <blp@ovn.org>
This commit is contained in:
Babu Shanmugam 2016-09-07 11:40:11 +05:30 committed by Ben Pfaff
parent 35f1085179
commit a6095f815e
9 changed files with 450 additions and 36 deletions

2
NEWS
View File

@ -1,5 +1,7 @@
Post-v2.6.0 Post-v2.6.0
--------------------- ---------------------
- OVN:
* QoS is now implemented via egress shaping rather than ingress policing.
- Fixed regression in table stats maintenance introduced in OVS - Fixed regression in table stats maintenance introduced in OVS
2.3.0, wherein the number of OpenFlow table hits and misses was 2.3.0, wherein the number of OpenFlow table hits and misses was
not accurate. not accurate.

View File

@ -26,6 +26,7 @@
#include "openvswitch/uuid.h" #include "openvswitch/uuid.h"
#include "util.h" #include "util.h"
struct expr;
struct lexer; struct lexer;
struct ofpbuf; struct ofpbuf;
struct shash; struct shash;
@ -66,7 +67,8 @@ struct simap;
OVNACT(GET_ND, ovnact_get_mac_bind) \ OVNACT(GET_ND, ovnact_get_mac_bind) \
OVNACT(PUT_ND, ovnact_put_mac_bind) \ OVNACT(PUT_ND, ovnact_put_mac_bind) \
OVNACT(PUT_DHCPV4_OPTS, ovnact_put_dhcp_opts) \ OVNACT(PUT_DHCPV4_OPTS, ovnact_put_dhcp_opts) \
OVNACT(PUT_DHCPV6_OPTS, ovnact_put_dhcp_opts) OVNACT(PUT_DHCPV6_OPTS, ovnact_put_dhcp_opts) \
OVNACT(SET_QUEUE, ovnact_set_queue)
/* enum ovnact_type, with a member OVNACT_<ENUM> for each action. */ /* enum ovnact_type, with a member OVNACT_<ENUM> for each action. */
enum OVS_PACKED_ENUM ovnact_type { enum OVS_PACKED_ENUM ovnact_type {
@ -219,6 +221,19 @@ struct ovnact_put_dhcp_opts {
size_t n_options; size_t n_options;
}; };
/* Valid arguments to SET_QUEUE action.
*
* QDISC_MIN_QUEUE_ID is the default queue, so user-defined queues should
* start at QDISC_MIN_QUEUE_ID+1. */
#define QDISC_MIN_QUEUE_ID 0
#define QDISC_MAX_QUEUE_ID 0xf000
/* OVNACT_SET_QUEUE. */
struct ovnact_set_queue {
struct ovnact ovnact;
uint16_t queue_id;
};
/* Internal use by the helpers below. */ /* Internal use by the helpers below. */
void ovnact_init(struct ovnact *, enum ovnact_type, size_t len); void ovnact_init(struct ovnact *, enum ovnact_type, size_t len);
void *ovnact_put(struct ofpbuf *, enum ovnact_type, size_t len); void *ovnact_put(struct ofpbuf *, enum ovnact_type, size_t len);

View File

@ -22,6 +22,7 @@
#include "lib/poll-loop.h" #include "lib/poll-loop.h"
#include "lib/sset.h" #include "lib/sset.h"
#include "lib/util.h" #include "lib/util.h"
#include "lib/netdev.h"
#include "lib/vswitch-idl.h" #include "lib/vswitch-idl.h"
#include "openvswitch/hmap.h" #include "openvswitch/hmap.h"
#include "openvswitch/vlog.h" #include "openvswitch/vlog.h"
@ -30,6 +31,15 @@
VLOG_DEFINE_THIS_MODULE(binding); VLOG_DEFINE_THIS_MODULE(binding);
#define OVN_QOS_TYPE "linux-htb"
struct qos_queue {
struct hmap_node node;
uint32_t queue_id;
uint32_t max_rate;
uint32_t burst;
};
void void
binding_register_ovs_idl(struct ovsdb_idl *ovs_idl) binding_register_ovs_idl(struct ovsdb_idl *ovs_idl)
{ {
@ -43,19 +53,22 @@ binding_register_ovs_idl(struct ovsdb_idl *ovs_idl)
ovsdb_idl_add_table(ovs_idl, &ovsrec_table_port); ovsdb_idl_add_table(ovs_idl, &ovsrec_table_port);
ovsdb_idl_add_column(ovs_idl, &ovsrec_port_col_name); ovsdb_idl_add_column(ovs_idl, &ovsrec_port_col_name);
ovsdb_idl_add_column(ovs_idl, &ovsrec_port_col_interfaces); ovsdb_idl_add_column(ovs_idl, &ovsrec_port_col_interfaces);
ovsdb_idl_add_column(ovs_idl, &ovsrec_port_col_qos);
ovsdb_idl_add_table(ovs_idl, &ovsrec_table_interface); ovsdb_idl_add_table(ovs_idl, &ovsrec_table_interface);
ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_name); ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_name);
ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_external_ids); ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_external_ids);
ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_ingress_policing_rate); ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_status);
ovsdb_idl_add_column(ovs_idl,
&ovsrec_interface_col_ingress_policing_burst); ovsdb_idl_add_table(ovs_idl, &ovsrec_table_qos);
ovsdb_idl_add_column(ovs_idl, &ovsrec_qos_col_type);
} }
static void static void
get_local_iface_ids(const struct ovsrec_bridge *br_int, get_local_iface_ids(const struct ovsrec_bridge *br_int,
struct shash *lport_to_iface, struct shash *lport_to_iface,
struct sset *all_lports) struct sset *all_lports,
struct sset *egress_ifaces)
{ {
int i; int i;
@ -73,11 +86,20 @@ get_local_iface_ids(const struct ovsrec_bridge *br_int,
iface_rec = port_rec->interfaces[j]; iface_rec = port_rec->interfaces[j];
iface_id = smap_get(&iface_rec->external_ids, "iface-id"); iface_id = smap_get(&iface_rec->external_ids, "iface-id");
if (!iface_id) {
continue; if (iface_id) {
shash_add(lport_to_iface, iface_id, iface_rec);
sset_add(all_lports, iface_id);
}
/* Check if this is a tunnel interface. */
if (smap_get(&iface_rec->options, "remote_ip")) {
const char *tunnel_iface
= smap_get(&iface_rec->status, "tunnel_egress_iface");
if (tunnel_iface) {
sset_add(egress_ifaces, tunnel_iface);
}
} }
shash_add(lport_to_iface, iface_id, iface_rec);
sset_add(all_lports, iface_id);
} }
} }
} }
@ -99,20 +121,166 @@ add_local_datapath(struct hmap *local_datapaths,
} }
static void static void
update_qos(const struct ovsrec_interface *iface_rec, get_qos_params(const struct sbrec_port_binding *pb, struct hmap *queue_map)
const struct sbrec_port_binding *pb)
{ {
int rate = smap_get_int(&pb->options, "policing_rate", 0); uint32_t max_rate = smap_get_int(&pb->options, "qos_max_rate", 0);
int burst = smap_get_int(&pb->options, "policing_burst", 0); uint32_t burst = smap_get_int(&pb->options, "qos_burst", 0);
uint32_t queue_id = smap_get_int(&pb->options, "qdisc_queue_id", 0);
ovsrec_interface_set_ingress_policing_rate(iface_rec, MAX(0, rate)); if ((!max_rate && !burst) || !queue_id) {
ovsrec_interface_set_ingress_policing_burst(iface_rec, MAX(0, burst)); /* Qos is not configured for this port. */
return;
}
struct qos_queue *node = xzalloc(sizeof *node);
hmap_insert(queue_map, &node->node, hash_int(queue_id, 0));
node->max_rate = max_rate;
node->burst = burst;
node->queue_id = queue_id;
}
static const struct ovsrec_qos *
get_noop_qos(struct controller_ctx *ctx)
{
const struct ovsrec_qos *qos;
OVSREC_QOS_FOR_EACH (qos, ctx->ovs_idl) {
if (!strcmp(qos->type, "linux-noop")) {
return qos;
}
}
if (!ctx->ovs_idl_txn) {
return NULL;
}
qos = ovsrec_qos_insert(ctx->ovs_idl_txn);
ovsrec_qos_set_type(qos, "linux-noop");
return qos;
}
static bool
set_noop_qos(struct controller_ctx *ctx, struct sset *egress_ifaces)
{
if (!ctx->ovs_idl_txn) {
return false;
}
const struct ovsrec_qos *noop_qos = get_noop_qos(ctx);
if (!noop_qos) {
return false;
}
const struct ovsrec_port *port;
size_t count = 0;
OVSREC_PORT_FOR_EACH (port, ctx->ovs_idl) {
if (sset_contains(egress_ifaces, port->name)) {
ovsrec_port_set_qos(port, noop_qos);
count++;
}
if (sset_count(egress_ifaces) == count) {
break;
}
}
return true;
}
static void
setup_qos(const char *egress_iface, struct hmap *queue_map)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
struct netdev *netdev_phy;
if (!egress_iface) {
/* Queues cannot be configured. */
return;
}
int error = netdev_open(egress_iface, NULL, &netdev_phy);
if (error) {
VLOG_WARN_RL(&rl, "%s: could not open netdev (%s)",
egress_iface, ovs_strerror(error));
return;
}
/* Check and configure qdisc. */
const char *qdisc_type;
struct smap qdisc_details;
smap_init(&qdisc_details);
if (netdev_get_qos(netdev_phy, &qdisc_type, &qdisc_details) != 0 ||
qdisc_type[0] == '\0') {
/* Qos is not supported. */
return;
}
if (strcmp(qdisc_type, OVN_QOS_TYPE)) {
error = netdev_set_qos(netdev_phy, OVN_QOS_TYPE, &qdisc_details);
if (error) {
VLOG_WARN_RL(&rl, "%s: could not configure QoS (%s)",
egress_iface, ovs_strerror(error));
}
}
/* Check and delete if needed. */
struct netdev_queue_dump dump;
unsigned int queue_id;
struct smap queue_details;
struct qos_queue *sb_info;
struct hmap consistent_queues;
smap_init(&queue_details);
hmap_init(&consistent_queues);
NETDEV_QUEUE_FOR_EACH (&queue_id, &queue_details, &dump, netdev_phy) {
bool is_queue_needed = false;
HMAP_FOR_EACH_WITH_HASH (sb_info, node, hash_int(queue_id, 0),
queue_map) {
is_queue_needed = true;
if (sb_info->max_rate ==
smap_get_int(&queue_details, "max-rate", 0)
&& sb_info->burst == smap_get_int(&queue_details, "burst", 0)) {
/* This queue is consistent. */
hmap_insert(&consistent_queues, &sb_info->node,
hash_int(queue_id, 0));
break;
}
}
if (!is_queue_needed) {
error = netdev_delete_queue(netdev_phy, queue_id);
if (error) {
VLOG_WARN_RL(&rl, "%s: could not delete queue %u (%s)",
egress_iface, queue_id, ovs_strerror(error));
}
}
}
/* Create/Update queues. */
HMAP_FOR_EACH (sb_info, node, queue_map) {
if (hmap_contains(&consistent_queues, &sb_info->node)) {
hmap_remove(&consistent_queues, &sb_info->node);
continue;
}
smap_clear(&queue_details);
smap_add_format(&queue_details, "max-rate", "%d", sb_info->max_rate);
smap_add_format(&queue_details, "burst", "%d", sb_info->burst);
error = netdev_set_queue(netdev_phy, sb_info->queue_id,
&queue_details);
if (error) {
VLOG_WARN_RL(&rl, "%s: could not configure queue %u (%s)",
egress_iface, sb_info->queue_id, ovs_strerror(error));
}
}
smap_destroy(&queue_details);
hmap_destroy(&consistent_queues);
netdev_close(netdev_phy);
} }
static void static void
consider_local_datapath(struct controller_ctx *ctx, consider_local_datapath(struct controller_ctx *ctx,
const struct sbrec_chassis *chassis_rec, const struct sbrec_chassis *chassis_rec,
const struct sbrec_port_binding *binding_rec, const struct sbrec_port_binding *binding_rec,
struct hmap *qos_map,
struct hmap *local_datapaths, struct hmap *local_datapaths,
struct shash *lport_to_iface, struct shash *lport_to_iface,
struct sset *all_lports) struct sset *all_lports)
@ -128,8 +296,8 @@ consider_local_datapath(struct controller_ctx *ctx,
sset_add(all_lports, binding_rec->logical_port); sset_add(all_lports, binding_rec->logical_port);
} }
add_local_datapath(local_datapaths, binding_rec); add_local_datapath(local_datapaths, binding_rec);
if (iface_rec && ctx->ovs_idl_txn) { if (iface_rec && qos_map && ctx->ovs_idl_txn) {
update_qos(iface_rec, binding_rec); get_qos_params(binding_rec, qos_map);
} }
if (binding_rec->chassis == chassis_rec) { if (binding_rec->chassis == chassis_rec) {
return; return;
@ -204,14 +372,18 @@ binding_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int,
const struct sbrec_chassis *chassis_rec; const struct sbrec_chassis *chassis_rec;
const struct sbrec_port_binding *binding_rec; const struct sbrec_port_binding *binding_rec;
struct shash lport_to_iface = SHASH_INITIALIZER(&lport_to_iface); struct shash lport_to_iface = SHASH_INITIALIZER(&lport_to_iface);
struct sset egress_ifaces = SSET_INITIALIZER(&egress_ifaces);
struct hmap qos_map;
chassis_rec = get_chassis(ctx->ovnsb_idl, chassis_id); chassis_rec = get_chassis(ctx->ovnsb_idl, chassis_id);
if (!chassis_rec) { if (!chassis_rec) {
return; return;
} }
hmap_init(&qos_map);
if (br_int) { if (br_int) {
get_local_iface_ids(br_int, &lport_to_iface, all_lports); get_local_iface_ids(br_int, &lport_to_iface, all_lports,
&egress_ifaces);
} }
/* Run through each binding record to see if it is resident on this /* Run through each binding record to see if it is resident on this
@ -219,11 +391,23 @@ binding_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int,
* directly connected logical ports and children of those ports. */ * directly connected logical ports and children of those ports. */
SBREC_PORT_BINDING_FOR_EACH(binding_rec, ctx->ovnsb_idl) { SBREC_PORT_BINDING_FOR_EACH(binding_rec, ctx->ovnsb_idl) {
consider_local_datapath(ctx, chassis_rec, binding_rec, consider_local_datapath(ctx, chassis_rec, binding_rec,
local_datapaths, &lport_to_iface, sset_is_empty(&egress_ifaces) ? NULL :
&qos_map, local_datapaths, &lport_to_iface,
all_lports); all_lports);
}
if (!sset_is_empty(&egress_ifaces)
&& set_noop_qos(ctx, &egress_ifaces)) {
const char *entry;
SSET_FOR_EACH (entry, &egress_ifaces) {
setup_qos(entry, &qos_map);
}
} }
shash_destroy(&lport_to_iface); shash_destroy(&lport_to_iface);
sset_destroy(&egress_ifaces);
hmap_destroy(&qos_map);
} }
/* Returns true if the database is all cleaned up, false if more work is /* Returns true if the database is all cleaned up, false if more work is

View File

@ -1614,6 +1614,46 @@ free_PUT_DHCPV6_OPTS(struct ovnact_put_dhcp_opts *pdo)
{ {
free_put_dhcp_opts(pdo); free_put_dhcp_opts(pdo);
} }
static void
parse_SET_QUEUE(struct action_context *ctx)
{
int queue_id;
if (!lexer_force_match(ctx->lexer, LEX_T_LPAREN)
|| !lexer_get_int(ctx->lexer, &queue_id)
|| !lexer_force_match(ctx->lexer, LEX_T_RPAREN)) {
return;
}
if (queue_id < QDISC_MIN_QUEUE_ID || queue_id > QDISC_MAX_QUEUE_ID) {
lexer_error(ctx->lexer, "Queue ID %d for set_queue is "
"not in valid range %d to %d.",
queue_id, QDISC_MIN_QUEUE_ID, QDISC_MAX_QUEUE_ID);
return;
}
ovnact_put_SET_QUEUE(ctx->ovnacts)->queue_id = queue_id;
}
static void
format_SET_QUEUE(const struct ovnact_set_queue *set_queue, struct ds *s)
{
ds_put_format(s, "set_queue(%d);", set_queue->queue_id);
}
static void
encode_SET_QUEUE(const struct ovnact_set_queue *set_queue,
const struct ovnact_encode_params *ep OVS_UNUSED,
struct ofpbuf *ofpacts)
{
ofpact_put_SET_QUEUE(ofpacts)->queue_id = set_queue->queue_id;
}
static void
free_SET_QUEUE(struct ovnact_set_queue *a OVS_UNUSED)
{
}
/* Parses an assignment or exchange or put_dhcp_opts action. */ /* Parses an assignment or exchange or put_dhcp_opts action. */
static void static void
@ -1685,6 +1725,8 @@ parse_action(struct action_context *ctx)
parse_get_mac_bind(ctx, 128, ovnact_put_GET_ND(ctx->ovnacts)); parse_get_mac_bind(ctx, 128, ovnact_put_GET_ND(ctx->ovnacts));
} else if (lexer_match_id(ctx->lexer, "put_nd")) { } else if (lexer_match_id(ctx->lexer, "put_nd")) {
parse_put_mac_bind(ctx, 128, ovnact_put_PUT_ND(ctx->ovnacts)); parse_put_mac_bind(ctx, 128, ovnact_put_PUT_ND(ctx->ovnacts));
} else if (lexer_match_id(ctx->lexer, "set_queue")) {
parse_SET_QUEUE(ctx);
} else { } else {
lexer_syntax_error(ctx->lexer, "expecting action"); lexer_syntax_error(ctx->lexer, "expecting action");
} }

View File

@ -32,6 +32,7 @@
#include "ovn/lib/ovn-nb-idl.h" #include "ovn/lib/ovn-nb-idl.h"
#include "ovn/lib/ovn-sb-idl.h" #include "ovn/lib/ovn-sb-idl.h"
#include "ovn/lib/ovn-util.h" #include "ovn/lib/ovn-util.h"
#include "ovn/actions.h"
#include "packets.h" #include "packets.h"
#include "poll-loop.h" #include "poll-loop.h"
#include "smap.h" #include "smap.h"
@ -276,6 +277,86 @@ allocate_tnlid(struct hmap *set, const char *name, uint32_t max,
return 0; return 0;
} }
struct ovn_chassis_qdisc_queues {
struct hmap_node key_node;
uint32_t queue_id;
struct uuid chassis_uuid;
};
static void
destroy_chassis_queues(struct hmap *set)
{
struct ovn_chassis_qdisc_queues *node;
HMAP_FOR_EACH_POP (node, key_node, set) {
free(node);
}
hmap_destroy(set);
}
static void
add_chassis_queue(struct hmap *set, struct uuid *chassis_uuid,
uint32_t queue_id)
{
struct ovn_chassis_qdisc_queues *node = xmalloc(sizeof *node);
node->queue_id = queue_id;
memcpy(&node->chassis_uuid, chassis_uuid, sizeof node->chassis_uuid);
hmap_insert(set, &node->key_node, uuid_hash(chassis_uuid));
}
static bool
chassis_queueid_in_use(const struct hmap *set, struct uuid *chassis_uuid,
uint32_t queue_id)
{
const struct ovn_chassis_qdisc_queues *node;
HMAP_FOR_EACH_WITH_HASH (node, key_node, uuid_hash(chassis_uuid), set) {
if (uuid_equals(chassis_uuid, &node->chassis_uuid)
&& node->queue_id == queue_id) {
return true;
}
}
return false;
}
static uint32_t
allocate_chassis_queueid(struct hmap *set, struct sbrec_chassis *chassis)
{
for (uint32_t queue_id = QDISC_MIN_QUEUE_ID + 1;
queue_id <= QDISC_MAX_QUEUE_ID;
queue_id++) {
if (!chassis_queueid_in_use(set, &chassis->header_.uuid, queue_id)) {
add_chassis_queue(set, &chassis->header_.uuid, queue_id);
return queue_id;
}
}
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
VLOG_WARN_RL(&rl, "all %s queue ids exhausted", chassis->name);
return 0;
}
static void
free_chassis_queueid(struct hmap *set, struct sbrec_chassis *chassis,
uint32_t queue_id)
{
struct ovn_chassis_qdisc_queues *node;
HMAP_FOR_EACH_WITH_HASH (node, key_node,
uuid_hash(&chassis->header_.uuid),
set) {
if (uuid_equals(&chassis->header_.uuid, &node->chassis_uuid)
&& node->queue_id == queue_id) {
hmap_remove(set, &node->key_node);
break;
}
}
}
static inline bool
port_has_qos_params(const struct smap *opts)
{
return (smap_get(opts, "qos_max_rate") ||
smap_get(opts, "qos_burst"));
}
/* The 'key' comes from nbs->header_.uuid or nbr->header_.uuid or /* The 'key' comes from nbs->header_.uuid or nbr->header_.uuid or
* sb->external_ids:logical-switch. */ * sb->external_ids:logical-switch. */
struct ovn_datapath { struct ovn_datapath {
@ -1056,6 +1137,7 @@ tag_alloc_create_new_tag(struct hmap *tag_alloc_table,
static void static void
join_logical_ports(struct northd_context *ctx, join_logical_ports(struct northd_context *ctx,
struct hmap *datapaths, struct hmap *ports, struct hmap *datapaths, struct hmap *ports,
struct hmap *chassis_qdisc_queues,
struct hmap *tag_alloc_table, struct ovs_list *sb_only, struct hmap *tag_alloc_table, struct ovs_list *sb_only,
struct ovs_list *nb_only, struct ovs_list *both) struct ovs_list *nb_only, struct ovs_list *both)
{ {
@ -1088,6 +1170,15 @@ join_logical_ports(struct northd_context *ctx,
} }
op->nbsp = nbsp; op->nbsp = nbsp;
ovs_list_remove(&op->list); ovs_list_remove(&op->list);
uint32_t queue_id = smap_get_int(&op->sb->options,
"qdisc_queue_id", 0);
if (queue_id && op->sb->chassis) {
add_chassis_queue(
chassis_qdisc_queues, &op->sb->chassis->header_.uuid,
queue_id);
}
ovs_list_push_back(both, &op->list); ovs_list_push_back(both, &op->list);
/* This port exists due to a SB binding, but should /* This port exists due to a SB binding, but should
@ -1241,7 +1332,8 @@ join_logical_ports(struct northd_context *ctx,
} }
static void static void
ovn_port_update_sbrec(const struct ovn_port *op) ovn_port_update_sbrec(const struct ovn_port *op,
struct hmap *chassis_qdisc_queues)
{ {
sbrec_port_binding_set_datapath(op->sb, op->od->sb); sbrec_port_binding_set_datapath(op->sb, op->od->sb);
if (op->nbrp) { if (op->nbrp) {
@ -1269,8 +1361,29 @@ ovn_port_update_sbrec(const struct ovn_port *op)
sbrec_port_binding_set_mac(op->sb, NULL, 0); sbrec_port_binding_set_mac(op->sb, NULL, 0);
} else { } else {
if (strcmp(op->nbsp->type, "router")) { if (strcmp(op->nbsp->type, "router")) {
uint32_t queue_id = smap_get_int(
&op->sb->options, "qdisc_queue_id", 0);
bool has_qos = port_has_qos_params(&op->nbsp->options);
struct smap options;
if (op->sb->chassis && has_qos && !queue_id) {
queue_id = allocate_chassis_queueid(chassis_qdisc_queues,
op->sb->chassis);
} else if (!has_qos && queue_id) {
free_chassis_queueid(chassis_qdisc_queues,
op->sb->chassis,
queue_id);
queue_id = 0;
}
smap_clone(&options, &op->nbsp->options);
if (queue_id) {
smap_add_format(&options,
"qdisc_queue_id", "%d", queue_id);
}
sbrec_port_binding_set_options(op->sb, &options);
smap_destroy(&options);
sbrec_port_binding_set_type(op->sb, op->nbsp->type); sbrec_port_binding_set_type(op->sb, op->nbsp->type);
sbrec_port_binding_set_options(op->sb, &op->nbsp->options);
} else { } else {
const char *chassis = NULL; const char *chassis = NULL;
if (op->peer && op->peer->od && op->peer->od->nbr) { if (op->peer && op->peer->od && op->peer->od->nbr) {
@ -1341,11 +1454,11 @@ build_ports(struct northd_context *ctx, struct hmap *datapaths,
struct hmap *ports) struct hmap *ports)
{ {
struct ovs_list sb_only, nb_only, both; struct ovs_list sb_only, nb_only, both;
struct hmap tag_alloc_table; struct hmap tag_alloc_table = HMAP_INITIALIZER(&tag_alloc_table);
hmap_init(&tag_alloc_table); struct hmap chassis_qdisc_queues = HMAP_INITIALIZER(&chassis_qdisc_queues);
join_logical_ports(ctx, datapaths, ports, &tag_alloc_table, &sb_only, join_logical_ports(ctx, datapaths, ports, &chassis_qdisc_queues,
&nb_only, &both); &tag_alloc_table, &sb_only, &nb_only, &both);
struct ovn_port *op, *next; struct ovn_port *op, *next;
/* For logical ports that are in both databases, update the southbound /* For logical ports that are in both databases, update the southbound
@ -1356,7 +1469,7 @@ build_ports(struct northd_context *ctx, struct hmap *datapaths,
if (op->nbsp) { if (op->nbsp) {
tag_alloc_create_new_tag(&tag_alloc_table, op->nbsp); tag_alloc_create_new_tag(&tag_alloc_table, op->nbsp);
} }
ovn_port_update_sbrec(op); ovn_port_update_sbrec(op, &chassis_qdisc_queues);
add_tnlid(&op->od->port_tnlids, op->sb->tunnel_key); add_tnlid(&op->od->port_tnlids, op->sb->tunnel_key);
if (op->sb->tunnel_key > op->od->port_key_hint) { if (op->sb->tunnel_key > op->od->port_key_hint) {
@ -1372,7 +1485,7 @@ build_ports(struct northd_context *ctx, struct hmap *datapaths,
} }
op->sb = sbrec_port_binding_insert(ctx->ovnsb_txn); op->sb = sbrec_port_binding_insert(ctx->ovnsb_txn);
ovn_port_update_sbrec(op); ovn_port_update_sbrec(op, &chassis_qdisc_queues);
sbrec_port_binding_set_logical_port(op->sb, op->key); sbrec_port_binding_set_logical_port(op->sb, op->key);
sbrec_port_binding_set_tunnel_key(op->sb, tunnel_key); sbrec_port_binding_set_tunnel_key(op->sb, tunnel_key);
@ -1394,6 +1507,7 @@ build_ports(struct northd_context *ctx, struct hmap *datapaths,
} }
tag_alloc_destroy(&tag_alloc_table); tag_alloc_destroy(&tag_alloc_table);
destroy_chassis_queues(&chassis_qdisc_queues);
} }
#define OVN_MIN_MULTICAST 32768 #define OVN_MIN_MULTICAST 32768
@ -2653,11 +2767,18 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
} }
ds_clear(&match); ds_clear(&match);
ds_clear(&actions);
ds_put_format(&match, "inport == %s", op->json_key); ds_put_format(&match, "inport == %s", op->json_key);
build_port_security_l2("eth.src", op->ps_addrs, op->n_ps_addrs, build_port_security_l2("eth.src", op->ps_addrs, op->n_ps_addrs,
&match); &match);
const char *queue_id = smap_get(&op->sb->options, "qdisc_queue_id");
if (queue_id) {
ds_put_format(&actions, "set_queue(%s); ", queue_id);
}
ds_put_cstr(&actions, "next;");
ovn_lflow_add(lflows, op->od, S_SWITCH_IN_PORT_SEC_L2, 50, ovn_lflow_add(lflows, op->od, S_SWITCH_IN_PORT_SEC_L2, 50,
ds_cstr(&match), "next;"); ds_cstr(&match), ds_cstr(&actions));
if (op->nbsp->n_port_security) { if (op->nbsp->n_port_security) {
build_port_security_ip(P_IN, op, lflows); build_port_security_ip(P_IN, op, lflows);

View File

@ -292,14 +292,14 @@
(empty string) (empty string)
</p> </p>
<column name="options" key="policing_rate"> <column name="options" key="qos_max_rate">
If set, indicates the maximum rate for data sent from this interface, If set, indicates the maximum rate for data sent from this interface,
in kbps. Data exceeding this rate is dropped. in bit/s. The traffic will be shaped according to this limit.
</column> </column>
<column name="options" key="policing_burst"> <column name="options" key="qos_burst">
If set, indicates the maximum burst size for data sent from this If set, indicates the maximum burst size for data sent from this
interface, in kb. interface, in bits.
</column> </column>
</group> </group>
</group> </group>

View File

@ -1333,6 +1333,29 @@
</p> </p>
</dd> </dd>
<dt>
<code>set_queue(<var>queue_number</var>);</code>
</dt>
<dd>
<p>
<b>Parameters</b>: Queue number <var>queue_number</var>, in the range 0 to 61440.
</p>
<p>
This is a logical equivalent of the OpenFlow <code>set_queue</code>
action. It affects packets that egress a hypervisor through a
physical interface. For nonzero <var>queue_number</var>, it
configures packet queuing to match the settings configured for the
<ref table="Port_Binding"/> with
<code>options:qdisc_queue_id</code> matching
<var>queue_number</var>. When <var>queue_number</var> is zero, it
resets queuing to the default strategy.
</p>
<p><b>Example:</b> <code>set_queue(10);</code></p>
</dd>
<dt><code>ct_lb;</code></dt> <dt><code>ct_lb;</code></dt>
<dt><code>ct_lb(</code><var>ip</var>[<code>:</code><var>port</var>]...<code>);</code></dt> <dt><code>ct_lb(</code><var>ip</var>[<code>:</code><var>port</var>]...<code>);</code></dt>
<dd> <dd>
@ -1858,14 +1881,21 @@ tcp.flags = RST;
(empty string) (empty string)
</p> </p>
<column name="options" key="policing_rate"> <column name="options" key="qos_max_rate">
If set, indicates the maximum rate for data sent from this interface, If set, indicates the maximum rate for data sent from this interface,
in kbps. Data exceeding this rate is dropped. in bit/s. The traffic will be shaped according to this limit.
</column> </column>
<column name="options" key="policing_burst"> <column name="options" key="qos_burst">
If set, indicates the maximum burst size for data sent from this If set, indicates the maximum burst size for data sent from this
interface, in kb. interface, in bits.
</column>
<column name="options" key="qdisc_queue_id"
type='{"type": "integer", "minInteger": 1, "maxInteger": 61440}'>
Indicates the queue number on the physical device. This is same as the
<code>queue_id</code> used in OpenFlow in <code>struct
ofp_action_enqueue</code>.
</column> </column>
</group> </group>

View File

@ -1289,6 +1289,18 @@ trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
case OVNACT_PUT_DHCPV6_OPTS: case OVNACT_PUT_DHCPV6_OPTS:
execute_put_dhcp_opts(ovnact_get_PUT_DHCPV6_OPTS(a), uflow); execute_put_dhcp_opts(ovnact_get_PUT_DHCPV6_OPTS(a), uflow);
break; break;
case OVNACT_SET_QUEUE:
/* The set_queue action is slippery from a logical perspective. It
* has no visible effect as long as the packet remains on the same
* chassis: it can bounce from one logical datapath to another
* retaining the queue and even end up at a VM on the same chassis.
* Without taking the physical arrangement into account, we can't
* do anything with this action other than just to note that it
* happened. If we ever add some physical knowledge to ovn-trace,
* though, it would be easy enough to track the queue information
* by adjusting uflow->skb_priority. */
break;
} }
} }

View File

@ -938,6 +938,14 @@ reg1[0] = put_dhcpv6_opts(ia_addr="ae70::4");
reg1[0] = put_dhcpv6_opts(ia_addr=ae70::4, domain_search=ae70::1); reg1[0] = put_dhcpv6_opts(ia_addr=ae70::4, domain_search=ae70::1);
DHCPv6 option domain_search requires string value. DHCPv6 option domain_search requires string value.
# set_queue
set_queue(0);
encodes as set_queue:0
set_queue(61440);
encodes as set_queue:61440
set_queue(65535);
Queue ID 65535 for set_queue is not in valid range 0 to 61440.
# Contradictionary prerequisites (allowed but not useful): # Contradictionary prerequisites (allowed but not useful):
ip4.src = ip6.src[0..31]; ip4.src = ip6.src[0..31];
encodes as move:NXM_NX_IPV6_SRC[0..31]->NXM_OF_IP_SRC[] encodes as move:NXM_NX_IPV6_SRC[0..31]->NXM_OF_IP_SRC[]