diff mbox series

[bpf-next,04/10] selftests/bpf: re-split main function into dedicated tests

Message ID 20241113-flow_dissector-v1-4-27c4df0592dc@bootlin.com
State Superseded
Headers show
Series selftests/bpf: migrate test_flow_dissector.sh to test_progs | expand

Commit Message

Alexis Lothoré Nov. 13, 2024, 1:53 p.m. UTC
The flow_dissector runs plenty of tests over diffent kind of packets,
grouped into three categories: skb mode, non-skb mode with direct
attach, and non-skb with indirect attach.

Re-split the main function into dedicated tests. Each test now must have
its own setup/teardown, but for the advantage of being able to run them
separately.

Signed-off-by: Alexis Lothoré (eBPF Foundation) <alexis.lothore@bootlin.com>
---
 .../selftests/bpf/prog_tests/flow_dissector.c      | 92 ++++++++++++++--------
 1 file changed, 57 insertions(+), 35 deletions(-)

Comments

Alexis Lothoré Nov. 14, 2024, 7:57 a.m. UTC | #1
Hello Stanislas, thanks for the reviews !

On 11/13/24 18:42, Stanislav Fomichev wrote:
> On 11/13, Alexis Lothoré (eBPF Foundation) wrote:
>> The flow_dissector runs plenty of tests over diffent kind of packets,
>> grouped into three categories: skb mode, non-skb mode with direct
>> attach, and non-skb with indirect attach.
>>
>> Re-split the main function into dedicated tests. Each test now must have
>> its own setup/teardown, but for the advantage of being able to run them
>> separately.
>>
>> Signed-off-by: Alexis Lothoré (eBPF Foundation) <alexis.lothore@bootlin.com>
>> ---
>>  .../selftests/bpf/prog_tests/flow_dissector.c      | 92 ++++++++++++++--------
>>  1 file changed, 57 insertions(+), 35 deletions(-)
>>
>> diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
>> index 6fbe8b6dad561aec02db552caea02517ac1e2109..c5dfff333fe31dd55ac152fe9b107828227c8177 100644
>> --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
>> +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
>> @@ -549,63 +549,101 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
>>  	}
>>  }
>>  
>> -static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
> 
> [..]
> 
>> +void serial_test_flow_dissector_skb_less_direct_attach(void)
> 
> Any specific reason you keep these as serial? Seems like one of the benefits
> of splitting them up is to be able to run them in parallel?

I guess there is no reason (I added those while investigating the namespace
exclusivity test issue, and forgot to remove them). I'll remove the serial prefix.

By the way I realize that each of those new tests could likely benefit from
running in an isolated net namespace (especially if they can run in
parallel), I'll add that too in v2.

Thanks,

Alexis
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index 6fbe8b6dad561aec02db552caea02517ac1e2109..c5dfff333fe31dd55ac152fe9b107828227c8177 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -549,63 +549,101 @@  static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
 	}
 }
 
-static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
+void serial_test_flow_dissector_skb_less_direct_attach(void)
 {
-	int err, prog_fd;
+	int err, prog_fd, tap_fd;
+	struct bpf_flow *skel;
+
+	skel = bpf_flow__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
+		return;
+
+	err = init_prog_array(skel->obj, skel->maps.jmp_table);
+	if (!ASSERT_OK(err, "init_prog_array"))
+		goto out_destroy_skel;
 
 	prog_fd = bpf_program__fd(skel->progs._dissect);
 	if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd"))
-		return;
+		goto out_destroy_skel;
 
 	err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
 	if (!ASSERT_OK(err, "bpf_prog_attach"))
-		return;
+		goto out_destroy_skel;
+
+	tap_fd = create_tap("tap0");
+	if (!ASSERT_OK_FD(tap_fd, "create_tap"))
+		goto out_destroy_skel;
+	err = ifup("tap0");
+	if (!ASSERT_OK(err, "ifup"))
+		goto out_close_tap;
 
 	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
 
 	err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
 	ASSERT_OK(err, "bpf_prog_detach2");
+
+out_close_tap:
+	close(tap_fd);
+out_destroy_skel:
+	bpf_flow__destroy(skel);
 }
 
-static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
+void serial_test_flow_dissector_skb_less_indirect_attach(void)
 {
+	int err, net_fd, tap_fd;
+	struct bpf_flow *skel;
 	struct bpf_link *link;
-	int err, net_fd;
+
+	skel = bpf_flow__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
+		return;
 
 	net_fd = open("/proc/self/ns/net", O_RDONLY);
 	if (!ASSERT_OK_FD(net_fd, "open(/proc/self/ns/net"))
-		return;
+		goto out_destroy_skel;
+
+	err = init_prog_array(skel->obj, skel->maps.jmp_table);
+	if (!ASSERT_OK(err, "init_prog_array"))
+		goto out_destroy_skel;
+
+	tap_fd = create_tap("tap0");
+	if (!ASSERT_OK_FD(tap_fd, "create_tap"))
+		goto out_clean_ns;
+	err = ifup("tap0");
+	if (!ASSERT_OK(err, "ifup"))
+		goto out_clean_ns;
 
 	link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
 	if (!ASSERT_OK_PTR(link, "attach_netns"))
-		goto out_close;
+		goto out_clean_ns;
 
 	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
 
 	err = bpf_link__destroy(link);
 	ASSERT_OK(err, "bpf_link__destroy");
-out_close:
+
+out_clean_ns:
 	close(net_fd);
+out_destroy_skel:
+	bpf_flow__destroy(skel);
 }
 
-void test_flow_dissector(void)
+void serial_test_flow_dissector_skb(void)
 {
-	int i, err, prog_fd, keys_fd = -1, tap_fd;
 	struct bpf_flow *skel;
+	int i, err, prog_fd;
 
 	skel = bpf_flow__open_and_load();
 	if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
 		return;
 
-	prog_fd = bpf_program__fd(skel->progs._dissect);
-	if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd"))
-		return;
-	keys_fd = bpf_map__fd(skel->maps.last_dissection);
-	if (!ASSERT_OK_FD(keys_fd, "bpf_map__fd"))
-		return;
 	err = init_prog_array(skel->obj, skel->maps.jmp_table);
 	if (!ASSERT_OK(err, "init_prog_array"))
-		return;
+		goto out_destroy_skel;
+
+	prog_fd = bpf_program__fd(skel->progs._dissect);
+	if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd"))
+		goto out_destroy_skel;
 
 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
 		struct bpf_flow_keys flow_keys;
@@ -635,24 +673,8 @@  void test_flow_dissector(void)
 			     sizeof(struct bpf_flow_keys),
 			     "returned flow keys");
 	}
-	/* Do the same tests but for skb-less flow dissector.
-	 * We use a known path in the net/tun driver that calls
-	 * eth_get_headlen and we manually export bpf_flow_keys
-	 * via BPF map in this case.
-	 */
-	tap_fd = create_tap("tap0");
-	if (!ASSERT_OK_FD(tap_fd, "create_tap"))
-		goto out_destroy_skel;
-	err = ifup("tap0");
-	if (!ASSERT_OK(err, "ifup"))
-		goto out_destroy_skel;
 
-	/* Test direct prog attachment */
-	test_skb_less_prog_attach(skel, tap_fd);
-	/* Test indirect prog attachment via link */
-	test_skb_less_link_create(skel, tap_fd);
-
-	close(tap_fd);
 out_destroy_skel:
 	bpf_flow__destroy(skel);
 }
+