Skip to content

Commit c9224db

Browse files
[review] updates: locking cleanup and scoped free id(s)
1 parent 4d6d726 commit c9224db

File tree

2 files changed

+854
-366
lines changed

2 files changed

+854
-366
lines changed

dpd-client/tests/integration_tests/mcast.rs

Lines changed: 103 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -599,7 +599,6 @@ async fn test_internal_ipv6_validation() {
599599
assert_eq!(updated.tag, Some("updated_tag".to_string()));
600600
assert_eq!(updated.ext_fwding.vlan_id, None);
601601

602-
// Cleanup
603602
cleanup_test_group(switch, created.group_ip).await;
604603
}
605604

@@ -685,7 +684,6 @@ async fn test_vlan_propagation_to_internal() {
685684
"Admin-scoped group bitmap should have VLAN 42 from external group"
686685
);
687686

688-
// Cleanup
689687
cleanup_test_group(switch, created_admin.group_ip).await;
690688
cleanup_test_group(switch, created_external.group_ip).await;
691689
}
@@ -695,7 +693,6 @@ async fn test_vlan_propagation_to_internal() {
695693
async fn test_group_api_lifecycle() {
696694
let switch = &*get_switch().await;
697695

698-
// Create admin-scoped IPv6 group for underlay replication infrastructure
699696
let egress1 = PhysPort(28);
700697
let internal_multicast_ip = IpAddr::V6(MULTICAST_NAT_IP);
701698
let underlay_group = create_test_multicast_group(
@@ -1330,7 +1327,6 @@ async fn test_api_invalid_combinations() {
13301327
),
13311328
}
13321329

1333-
// Cleanup
13341330
cleanup_test_group(switch, created_ipv4.group_ip).await;
13351331
cleanup_test_group(switch, created_non_admin.group_ip).await;
13361332
cleanup_test_group(switch, internal_multicast_ip).await;
@@ -2024,7 +2020,6 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_external_members(
20242020
.await
20252021
.unwrap();
20262022

2027-
// Run the test
20282023
let result = switch.packet_test(vec![test_pkt], expected_pkts);
20292024

20302025
check_counter_incremented(
@@ -2163,7 +2158,6 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_members(
21632158
.await
21642159
.unwrap();
21652160

2166-
// Run the test
21672161
let result = switch.packet_test(vec![test_pkt], expected_pkts);
21682162

21692163
check_counter_incremented(
@@ -2320,7 +2314,6 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_and_external_membe
23202314
.await
23212315
.unwrap();
23222316

2323-
// Run the test
23242317
let result = switch.packet_test(vec![test_pkt], expected_pkts);
23252318

23262319
check_counter_incremented(
@@ -4335,7 +4328,6 @@ async fn test_external_group_nat_target_validation() {
43354328
"External group should have no members"
43364329
);
43374330

4338-
// Cleanup
43394331
cleanup_test_group(switch, created_admin.group_ip).await;
43404332
cleanup_test_group(switch, created_external.group_ip).await;
43414333
}
@@ -4510,3 +4502,106 @@ async fn test_ipv6_multicast_scope_validation() {
45104502
.await
45114503
.ok();
45124504
}
4505+
4506+
#[tokio::test]
4507+
#[ignore]
4508+
async fn test_multicast_group_id_recycling() {
4509+
let switch = &*get_switch().await;
4510+
4511+
// Use admin-scoped IPv6 addresses that get group IDs assigned
4512+
let group1_ip = IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 10));
4513+
let group2_ip = IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 11));
4514+
let group3_ip = IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 12));
4515+
4516+
// Create first group and capture its group IDs
4517+
let group1 = create_test_multicast_group(
4518+
switch,
4519+
group1_ip,
4520+
Some("test_recycling_1"),
4521+
&[(PhysPort(11), types::Direction::External)],
4522+
None,
4523+
false,
4524+
None,
4525+
)
4526+
.await;
4527+
4528+
let group1_external_id = group1.external_group_id;
4529+
assert!(group1_external_id.is_some());
4530+
4531+
// Create second group and capture its group IDs
4532+
let group2 = create_test_multicast_group(
4533+
switch,
4534+
group2_ip,
4535+
Some("test_recycling_2"),
4536+
&[(PhysPort(12), types::Direction::External)],
4537+
None,
4538+
false,
4539+
None,
4540+
)
4541+
.await;
4542+
4543+
let group2_external_id = group2.external_group_id;
4544+
assert!(group2_external_id.is_some());
4545+
assert_ne!(group1_external_id, group2_external_id);
4546+
4547+
// Delete the first group
4548+
switch
4549+
.client
4550+
.multicast_group_delete(&group1_ip)
4551+
.await
4552+
.expect("Should be able to delete first group");
4553+
4554+
// Create third group - should reuse the first group's ID
4555+
let group3 = create_test_multicast_group(
4556+
switch,
4557+
group3_ip,
4558+
Some("test_recycling_3"),
4559+
&[(PhysPort(13), types::Direction::External)],
4560+
None,
4561+
false,
4562+
None,
4563+
)
4564+
.await;
4565+
4566+
let group3_external_id = group3.external_group_id;
4567+
assert!(group3_external_id.is_some());
4568+
4569+
// Verify that ID recycling is working - group3 should get an ID that was previously used
4570+
// The exact ID depends on allocation strategy, but it should be different from group2
4571+
assert_ne!(
4572+
group2_external_id, group3_external_id,
4573+
"Third group should get a different ID than the active second group"
4574+
);
4575+
4576+
// Create a fourth group after deleting group2, it should reuse group2's ID
4577+
switch
4578+
.client
4579+
.multicast_group_delete(&group2_ip)
4580+
.await
4581+
.expect("Should be able to delete second group");
4582+
4583+
let group4_ip = IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 13));
4584+
let group4 = create_test_multicast_group(
4585+
switch,
4586+
group4_ip,
4587+
Some("test_recycling_4"),
4588+
&[(PhysPort(14), types::Direction::External)],
4589+
None,
4590+
false,
4591+
None,
4592+
)
4593+
.await;
4594+
4595+
let group4_external_id = group4.external_group_id;
4596+
assert!(group4_external_id.is_some());
4597+
4598+
// Group4 should reuse group2's recently freed ID due to stack-like allocation
4599+
assert_eq!(
4600+
group2_external_id, group4_external_id,
4601+
"Fourth group should reuse second group's recycled ID"
4602+
);
4603+
4604+
// Cleanup
4605+
cleanup_test_group(switch, group3_ip).await;
4606+
cleanup_test_group(switch, group4_ip).await;
4607+
}

0 commit comments

Comments
 (0)