1
0
Fork 0

net: sched: RED: Introduce an ECN nodrop mode

When the RED Qdisc is currently configured to enable ECN, the RED algorithm
is used to decide whether a certain SKB should be marked. If that SKB is
not ECN-capable, it is early-dropped.

It is also possible to keep all traffic in the queue, and just mark the
ECN-capable subset of it, as appropriate under the RED algorithm. Some
switches support this mode, and some installations make use of it.

To that end, add a new RED flag, TC_RED_NODROP. When the Qdisc is
configured with this flag, non-ECT traffic is enqueued instead of being
early-dropped.

Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
alistair/sensors
Petr Machata 2020-03-13 01:10:57 +02:00 committed by David S. Miller
parent 14bc175d9c
commit 0a7fad2376
4 changed files with 32 additions and 6 deletions

View File

@ -740,6 +740,7 @@ struct tc_red_qopt_offload_params {
u32 limit;
bool is_ecn;
bool is_harddrop;
bool is_nodrop;
struct gnet_stats_queue *qstats;
};

View File

@ -209,6 +209,11 @@ static inline int red_get_flags(unsigned char qopt_flags,
static inline int red_validate_flags(unsigned char flags,
struct netlink_ext_ack *extack)
{
if ((flags & TC_RED_NODROP) && !(flags & TC_RED_ECN)) {
NL_SET_ERR_MSG_MOD(extack, "nodrop mode is only meaningful with ECN");
return -EINVAL;
}
return 0;
}

View File

@ -286,6 +286,7 @@ struct tc_red_qopt {
#define TC_RED_ECN 1
#define TC_RED_HARDDROP 2
#define TC_RED_ADAPTATIVE 4
#define TC_RED_NODROP 8
};
#define TC_RED_HISTORIC_FLAGS (TC_RED_ECN | TC_RED_HARDDROP | TC_RED_ADAPTATIVE)

View File

@ -48,7 +48,7 @@ struct red_sched_data {
struct Qdisc *qdisc;
};
static const u32 red_supported_flags = TC_RED_HISTORIC_FLAGS;
static const u32 red_supported_flags = TC_RED_HISTORIC_FLAGS | TC_RED_NODROP;
static inline int red_use_ecn(struct red_sched_data *q)
{
@ -60,6 +60,11 @@ static inline int red_use_harddrop(struct red_sched_data *q)
return q->flags & TC_RED_HARDDROP;
}
static int red_use_nodrop(struct red_sched_data *q)
{
return q->flags & TC_RED_NODROP;
}
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
@ -80,23 +85,36 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
case RED_PROB_MARK:
qdisc_qstats_overlimit(sch);
if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
if (!red_use_ecn(q)) {
q->stats.prob_drop++;
goto congestion_drop;
}
q->stats.prob_mark++;
if (INET_ECN_set_ce(skb)) {
q->stats.prob_mark++;
} else if (!red_use_nodrop(q)) {
q->stats.prob_drop++;
goto congestion_drop;
}
/* Non-ECT packet in ECN nodrop mode: queue it. */
break;
case RED_HARD_MARK:
qdisc_qstats_overlimit(sch);
if (red_use_harddrop(q) || !red_use_ecn(q) ||
!INET_ECN_set_ce(skb)) {
if (red_use_harddrop(q) || !red_use_ecn(q)) {
q->stats.forced_drop++;
goto congestion_drop;
}
q->stats.forced_mark++;
if (INET_ECN_set_ce(skb)) {
q->stats.forced_mark++;
} else if (!red_use_nodrop(q)) {
q->stats.forced_drop++;
goto congestion_drop;
}
/* Non-ECT packet in ECN nodrop mode: queue it. */
break;
}
@ -171,6 +189,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
opt.set.limit = q->limit;
opt.set.is_ecn = red_use_ecn(q);
opt.set.is_harddrop = red_use_harddrop(q);
opt.set.is_nodrop = red_use_nodrop(q);
opt.set.qstats = &sch->qstats;
} else {
opt.command = TC_RED_DESTROY;