@@ -978,6 +978,13 @@ show_k:
goto show_k;
}
+static __u32 u32_hash_fold(struct tc_u32_key *key)
+{
+ __u8 fshift = key->mask ? ffs(ntohl(key->mask)) - 1 : 0;
+
+ return ntohl(key->val & key->mask) >> fshift;
+}
+
static int u32_parse_opt(struct filter_util *qu, char *handle,
int argc, char **argv, struct nlmsghdr *n)
{
@@ -1110,9 +1117,7 @@ static int u32_parse_opt(struct filter_util *qu, char *handle,
}
NEXT_ARG();
}
- hash = sel2.keys[0].val & sel2.keys[0].mask;
- hash ^= hash >> 16;
- hash ^= hash >> 8;
+ hash = u32_hash_fold(&sel2.keys[0]);
htid = ((hash % divisor) << 12) | (htid & 0xFFF00000);
sample_ok = 1;
continue;
In between Linux kernel 2.4 and 2.6, key folding for hash tables changed in kernel space. When iproute2 dropped support for the older algorithm, the wrong code was removed and kernel 2.4 folding method remained in place. To get things functional for recent kernels again, restoring the old code alone was not sufficient - additional byteorder fixes were needed. While being at it, make use of ffs() and thereby align the code with how kernel determines the shift width. Fixes: 267480f55383c ("Backout the 2.4 utsname hash patch.") Signed-off-by: Phil Sutter <phil@nwl.cc> --- Resending because the original post was archived in Patchwork. --- tc/f_u32.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)