Message ID | 0124A7CD-1C46-4BC2-A18C-9B03DD57B8B8@itu.dk |
---|---|
State | Superseded |
Headers | show |
Series | net: sched: Add support for packet bursting. | expand |
On Fri, Jun 25, 2021 at 5:03 AM Niclas Hedam <nhed@itu.dk> wrote: > diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h > index 79a699f106b1..826d1dee6601 100644 > --- a/include/uapi/linux/pkt_sched.h > +++ b/include/uapi/linux/pkt_sched.h > @@ -594,6 +594,7 @@ enum { > TCA_NETEM_DELAY_DIST, > TCA_NETEM_REORDER, > TCA_NETEM_CORRUPT, > + TCA_NETEM_BURSTING, > TCA_NETEM_LOSS, > TCA_NETEM_RATE, > TCA_NETEM_ECN, You can't add a new enum in the middle, as it is UAPI. Thanks.
Hi Cong, Good point. Here is a new patch. From 71843907bdb9cdc4e24358f0c16a8778f2762dc7 Mon Sep 17 00:00:00 2001 From: Niclas Hedam <nhed@itu.dk> Date: Fri, 25 Jun 2021 13:37:18 +0200 Subject: [PATCH] net: sched: Add support for packet bursting. This commit implements packet bursting in the NetEm scheduler. This allows system administrators to hold back outgoing packets and release them at a multiple of a time quantum. This feature can be used to prevent timing attacks caused by network latency. Signed-off-by: Niclas Hedam <niclas@hed.am> --- include/uapi/linux/pkt_sched.h | 2 ++ net/sched/sch_netem.c | 24 +++++++++++++++++++++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 79a699f106b1..1ba49f141dae 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -603,6 +603,7 @@ enum { TCA_NETEM_JITTER64, TCA_NETEM_SLOT, TCA_NETEM_SLOT_DIST, + TCA_NETEM_BURSTING, __TCA_NETEM_MAX, }; @@ -615,6 +616,7 @@ struct tc_netem_qopt { __u32 gap; /* re-ordering gap (0 for none) */ __u32 duplicate; /* random packet dup (0=none ~0=100%) */ __u32 jitter; /* random jitter in latency (us) */ + __u32 bursting; /* send packets in bursts (us) */ }; struct tc_netem_corr { diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 0c345e43a09a..52d796287b86 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -85,6 +85,7 @@ struct netem_sched_data { s64 latency; s64 jitter; + u32 bursting; u32 loss; u32 ecn; u32 limit; @@ -467,7 +468,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* If a delay is expected, orphan the skb. (orphaning usually takes * place at TX completion time, so _before_ the link transit delay) */ - if (q->latency || q->jitter || q->rate) + if (q->latency || q->jitter || q->rate || q->bursting) skb_orphan_partial(skb); /* @@ -527,8 +528,17 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, qdisc_qstats_backlog_inc(sch, skb); cb = netem_skb_cb(skb); - if (q->gap == 0 || /* not doing reordering */ - q->counter < q->gap - 1 || /* inside last reordering gap */ + if (q->bursting > 0) { + u64 now; + + now = ktime_get_ns(); + + cb->time_to_send = now - (now % q->bursting) + q->bursting; + + ++q->counter; + tfifo_enqueue(skb, sch); + } else if (q->gap == 0 || /* not doing reordering */ + q->counter < q->gap - 1 || /* inside last reordering gap */ q->reorder < get_crandom(&q->reorder_cor)) { u64 now; s64 delay; @@ -927,6 +937,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { [TCA_NETEM_ECN] = { .type = NLA_U32 }, [TCA_NETEM_RATE64] = { .type = NLA_U64 }, [TCA_NETEM_LATENCY64] = { .type = NLA_S64 }, + [TCA_NETEM_BURSTING] = { .type = NLA_U64 }, [TCA_NETEM_JITTER64] = { .type = NLA_S64 }, [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) }, }; @@ -1001,6 +1012,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, q->latency = PSCHED_TICKS2NS(qopt->latency); q->jitter = PSCHED_TICKS2NS(qopt->jitter); + q->bursting = PSCHED_TICKS2NS(qopt->bursting); q->limit = qopt->limit; q->gap = qopt->gap; q->counter = 0; @@ -1032,6 +1044,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_NETEM_LATENCY64]) q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]); + if (tb[TCA_NETEM_BURSTING]) + q->bursting = nla_get_u64(tb[TCA_NETEM_BURSTING]); + if (tb[TCA_NETEM_JITTER64]) q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]); @@ -1150,6 +1165,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) UINT_MAX); qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter), UINT_MAX); + qopt.bursting = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->bursting), + UINT_MAX); + qopt.limit = q->limit; qopt.loss = q->loss; qopt.gap = q->gap; -- 2.25.1 > On 27 Jun 2021, at 20:32, Cong Wang <xiyou.wangcong@gmail.com> wrote: > > On Fri, Jun 25, 2021 at 5:03 AM Niclas Hedam <nhed@itu.dk> wrote: >> diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h >> index 79a699f106b1..826d1dee6601 100644 >> --- a/include/uapi/linux/pkt_sched.h >> +++ b/include/uapi/linux/pkt_sched.h >> @@ -594,6 +594,7 @@ enum { >> TCA_NETEM_DELAY_DIST, >> TCA_NETEM_REORDER, >> TCA_NETEM_CORRUPT, >> + TCA_NETEM_BURSTING, >> TCA_NETEM_LOSS, >> TCA_NETEM_RATE, >> TCA_NETEM_ECN, > > You can't add a new enum in the middle, as it is UAPI. > > Thanks.
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 79a699f106b1..826d1dee6601 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -594,6 +594,7 @@ enum { TCA_NETEM_DELAY_DIST, TCA_NETEM_REORDER, TCA_NETEM_CORRUPT, + TCA_NETEM_BURSTING, TCA_NETEM_LOSS, TCA_NETEM_RATE, TCA_NETEM_ECN, @@ -615,6 +616,7 @@ struct tc_netem_qopt { __u32 gap; /* re-ordering gap (0 for none) */ __u32 duplicate; /* random packet dup (0=none ~0=100%) */ __u32 jitter; /* random jitter in latency (us) */ + __u32 bursting; /* send packets in bursts (us) */ }; struct tc_netem_corr { diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 0c345e43a09a..52d796287b86 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -85,6 +85,7 @@ struct netem_sched_data { s64 latency; s64 jitter; + u32 bursting; u32 loss; u32 ecn; u32 limit; @@ -467,7 +468,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* If a delay is expected, orphan the skb. (orphaning usually takes * place at TX completion time, so _before_ the link transit delay) */ - if (q->latency || q->jitter || q->rate) + if (q->latency || q->jitter || q->rate || q->bursting) skb_orphan_partial(skb); /* @@ -527,8 +528,17 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, qdisc_qstats_backlog_inc(sch, skb); cb = netem_skb_cb(skb); - if (q->gap == 0 || /* not doing reordering */ - q->counter < q->gap - 1 || /* inside last reordering gap */ + if (q->bursting > 0) { + u64 now; + + now = ktime_get_ns(); + + cb->time_to_send = now - (now % q->bursting) + q->bursting; + + ++q->counter; + tfifo_enqueue(skb, sch); + } else if (q->gap == 0 || /* not doing reordering */ + q->counter < q->gap - 1 || /* inside last reordering gap */ q->reorder < get_crandom(&q->reorder_cor)) { u64 now; s64 delay; @@ -927,6 +937,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { [TCA_NETEM_ECN] = { .type = NLA_U32 }, [TCA_NETEM_RATE64] = { .type = NLA_U64 }, [TCA_NETEM_LATENCY64] = { .type = NLA_S64 }, + [TCA_NETEM_BURSTING] = { .type = NLA_U64 }, [TCA_NETEM_JITTER64] = { .type = NLA_S64 }, [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) }, }; @@ -1001,6 +1012,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, q->latency = PSCHED_TICKS2NS(qopt->latency); q->jitter = PSCHED_TICKS2NS(qopt->jitter); + q->bursting = PSCHED_TICKS2NS(qopt->bursting); q->limit = qopt->limit; q->gap = qopt->gap; q->counter = 0; @@ -1032,6 +1044,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_NETEM_LATENCY64]) q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]); + if (tb[TCA_NETEM_BURSTING]) + q->bursting = nla_get_u64(tb[TCA_NETEM_BURSTING]); + if (tb[TCA_NETEM_JITTER64]) q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]); @@ -1150,6 +1165,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) UINT_MAX); qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter), UINT_MAX); + qopt.bursting = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->bursting), + UINT_MAX); + qopt.limit = q->limit; qopt.loss = q->loss; qopt.gap = q->gap;
Hello, This patch implements packet bursting in the NetEm scheduler. This allows system administrators to hold back outgoing packets and release them at a multiple of a time quantum. This feature can be used to prevent timing attacks caused by network latency. I'm currently publishing a paper on this, which is currently not publicly available, but the idea is based on Predictive Black-Box Mitigation of Timing Channels (https://dl.acm.org/doi/pdf/10.1145/1866307.1866341). Signed-off-by: Niclas Hedam <niclas@hed.am> --- include/uapi/linux/pkt_sched.h | 2 ++ net/sched/sch_netem.c | 24 +++++++++++++++++++++--- 2 files changed, 23 insertions(+), 3 deletions(-) -- 2.25.1