Skip to content

Commit b09c63c

Browse files
..
1 parent f1b3127 commit b09c63c

File tree

1 file changed

+74
-254
lines changed
  • dpd-client/tests/integration_tests

1 file changed

+74
-254
lines changed

dpd-client/tests/integration_tests/mcast.rs

Lines changed: 74 additions & 254 deletions
Original file line numberDiff line numberDiff line change
@@ -3837,260 +3837,6 @@ async fn test_multicast_reset_all_tables() -> TestResult {
38373837
Ok(())
38383838
}
38393839

3840-
#[tokio::test]
3841-
#[ignore]
3842-
async fn test_multicast_group_id_recycling() -> TestResult {
3843-
let switch = &*get_switch().await;
3844-
3845-
let (port_id, link_id) = switch.link_id(PhysPort(28)).unwrap();
3846-
3847-
let group_ip1: Ipv6Addr = "ff04::100".parse().unwrap();
3848-
let internal_create1 = types::MulticastGroupCreateEntry {
3849-
group_ip: group_ip1,
3850-
tag: Some("recycling_test_1".to_string()),
3851-
sources: None,
3852-
members: vec![types::MulticastGroupMember {
3853-
port_id,
3854-
link_id,
3855-
direction: types::Direction::External,
3856-
}],
3857-
};
3858-
3859-
let created_group1 = switch
3860-
.client
3861-
.multicast_group_create(&internal_create1)
3862-
.await?
3863-
.into_inner();
3864-
3865-
let first_external_id = created_group1.external_group_id;
3866-
let first_underlay_id = created_group1.underlay_group_id;
3867-
3868-
switch.client.multicast_group_delete(&group_ip1.into()).await?;
3869-
3870-
let group_ip2: Ipv6Addr = "ff04::101".parse().unwrap();
3871-
let internal_create2 = types::MulticastGroupCreateEntry {
3872-
group_ip: group_ip2,
3873-
tag: Some("recycling_test_2".to_string()),
3874-
sources: None,
3875-
members: vec![types::MulticastGroupMember {
3876-
port_id,
3877-
link_id,
3878-
direction: types::Direction::External,
3879-
}],
3880-
};
3881-
3882-
let created_group2 = switch
3883-
.client
3884-
.multicast_group_create(&internal_create2)
3885-
.await?
3886-
.into_inner();
3887-
3888-
assert!(
3889-
created_group2.external_group_id.is_some(),
3890-
"Second group should have external group ID"
3891-
);
3892-
assert!(
3893-
created_group2.underlay_group_id.is_none(),
3894-
"Second group should not have underlay group ID"
3895-
);
3896-
3897-
let group_ip3: Ipv6Addr = "ff04::102".parse().unwrap();
3898-
let internal_create3 = types::MulticastGroupCreateEntry {
3899-
group_ip: group_ip3,
3900-
tag: Some("recycling_test_3".to_string()),
3901-
sources: None,
3902-
members: vec![types::MulticastGroupMember {
3903-
port_id,
3904-
link_id,
3905-
direction: types::Direction::External,
3906-
}],
3907-
};
3908-
3909-
let created_group3 = switch
3910-
.client
3911-
.multicast_group_create(&internal_create3)
3912-
.await?
3913-
.into_inner();
3914-
3915-
assert!(
3916-
created_group3.external_group_id.is_some(),
3917-
"Third group should have external group ID"
3918-
);
3919-
assert!(
3920-
created_group3.underlay_group_id.is_none(),
3921-
"Third group should not have underlay group ID"
3922-
);
3923-
3924-
cleanup_test_group(switch, group_ip2.into()).await;
3925-
cleanup_test_group(switch, group_ip3.into()).await;
3926-
3927-
Ok(())
3928-
}
3929-
3930-
#[tokio::test]
3931-
#[ignore]
3932-
async fn test_multicast_group_id_reuse() -> TestResult {
3933-
let switch = &*get_switch().await;
3934-
3935-
let (port_id, link_id) = switch.link_id(PhysPort(29)).unwrap();
3936-
3937-
let mut deleted_external_ids = Vec::new();
3938-
3939-
for i in 0..5 {
3940-
let group_ip: Ipv6Addr = format!("ff04::{}", 200 + i).parse().unwrap();
3941-
let internal_create = types::MulticastGroupCreateEntry {
3942-
group_ip,
3943-
tag: Some(format!("id_reuse_test_{}", i)),
3944-
sources: None,
3945-
members: vec![types::MulticastGroupMember {
3946-
port_id,
3947-
link_id,
3948-
direction: types::Direction::External,
3949-
}],
3950-
};
3951-
3952-
let created_group = switch
3953-
.client
3954-
.multicast_group_create(&internal_create)
3955-
.await?
3956-
.into_inner();
3957-
3958-
if let Some(external_id) = created_group.external_group_id {
3959-
deleted_external_ids.push(external_id);
3960-
}
3961-
3962-
switch.client.multicast_group_delete(&group_ip.into()).await?;
3963-
}
3964-
3965-
let new_group_ip: Ipv6Addr = "ff04::300".parse().unwrap();
3966-
let new_internal_create = types::MulticastGroupCreateEntry {
3967-
group_ip: new_group_ip,
3968-
tag: Some("id_reuse_verification".to_string()),
3969-
sources: None,
3970-
members: vec![types::MulticastGroupMember {
3971-
port_id,
3972-
link_id,
3973-
direction: types::Direction::External,
3974-
}],
3975-
};
3976-
3977-
let new_created_group = switch
3978-
.client
3979-
.multicast_group_create(&new_internal_create)
3980-
.await?
3981-
.into_inner();
3982-
3983-
if let Some(new_external_id) = new_created_group.external_group_id {
3984-
assert!(
3985-
deleted_external_ids.contains(&new_external_id),
3986-
"New group should reuse a deleted external group ID, got {}, expected one of {:?}",
3987-
new_external_id,
3988-
deleted_external_ids
3989-
);
3990-
} else {
3991-
panic!("New group should have an external group ID");
3992-
}
3993-
3994-
cleanup_test_group(switch, new_group_ip.into()).await;
3995-
3996-
Ok(())
3997-
}
3998-
3999-
#[tokio::test]
4000-
#[ignore]
4001-
async fn test_multicast_group_id_pool_exhaustion_recovery() -> TestResult {
4002-
let switch = &*get_switch().await;
4003-
4004-
let (port_id, link_id) = switch.link_id(PhysPort(30)).unwrap();
4005-
4006-
let mut created_groups = Vec::new();
4007-
let num_groups = 10;
4008-
4009-
for i in 0..num_groups {
4010-
let group_ip: Ipv6Addr = format!("ff04::{}", 400 + i).parse().unwrap();
4011-
let internal_create = types::MulticastGroupCreateEntry {
4012-
group_ip,
4013-
tag: Some(format!("pool_test_{}", i)),
4014-
sources: None,
4015-
members: vec![types::MulticastGroupMember {
4016-
port_id,
4017-
link_id,
4018-
direction: types::Direction::External,
4019-
}],
4020-
};
4021-
4022-
let created_group = switch
4023-
.client
4024-
.multicast_group_create(&internal_create)
4025-
.await?
4026-
.into_inner();
4027-
4028-
created_groups.push((group_ip, created_group));
4029-
}
4030-
4031-
assert_eq!(
4032-
created_groups.len(),
4033-
num_groups,
4034-
"Should have created {} groups",
4035-
num_groups
4036-
);
4037-
4038-
let groups_to_delete = &created_groups[0..num_groups / 2];
4039-
for (group_ip, _) in groups_to_delete {
4040-
switch.client.multicast_group_delete(&(*group_ip).into()).await?;
4041-
}
4042-
4043-
let mut new_groups = Vec::new();
4044-
for i in 0..num_groups / 2 {
4045-
let group_ip: Ipv6Addr = format!("ff04::{}", 500 + i).parse().unwrap();
4046-
let internal_create = types::MulticastGroupCreateEntry {
4047-
group_ip,
4048-
tag: Some(format!("pool_recovery_test_{}", i)),
4049-
sources: None,
4050-
members: vec![types::MulticastGroupMember {
4051-
port_id,
4052-
link_id,
4053-
direction: types::Direction::External,
4054-
}],
4055-
};
4056-
4057-
let created_group = switch
4058-
.client
4059-
.multicast_group_create(&internal_create)
4060-
.await?
4061-
.into_inner();
4062-
4063-
new_groups.push((group_ip, created_group));
4064-
}
4065-
4066-
assert_eq!(
4067-
new_groups.len(),
4068-
num_groups / 2,
4069-
"Should have created {} new groups after pool recovery",
4070-
num_groups / 2
4071-
);
4072-
4073-
for (_, group) in &new_groups {
4074-
assert!(
4075-
group.external_group_id.is_some(),
4076-
"New group should have external group ID"
4077-
);
4078-
assert!(
4079-
group.underlay_group_id.is_none(),
4080-
"New group should not have underlay group ID"
4081-
);
4082-
}
4083-
4084-
for (group_ip, _) in &created_groups[num_groups / 2..] {
4085-
cleanup_test_group(switch, (*group_ip).into()).await;
4086-
}
4087-
for (group_ip, _) in &new_groups {
4088-
cleanup_test_group(switch, (*group_ip).into()).await;
4089-
}
4090-
4091-
Ok(())
4092-
}
4093-
40943840
#[tokio::test]
40953841
#[ignore]
40963842
async fn test_multicast_vlan_translation_not_possible() -> TestResult {
@@ -4756,3 +4502,77 @@ async fn test_ipv6_multicast_scope_validation() {
47564502
.await
47574503
.ok();
47584504
}
4505+
4506+
#[tokio::test]
4507+
#[ignore]
4508+
async fn test_multicast_group_id_recycling() {
4509+
let switch = &*get_switch().await;
4510+
4511+
// Use admin-scoped IPv6 addresses that get group IDs assigned
4512+
let group1_ip = IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 10));
4513+
let group2_ip = IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 11));
4514+
let group3_ip = IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 12));
4515+
4516+
// Create first group and capture its group IDs
4517+
let group1 = create_test_multicast_group(
4518+
switch,
4519+
group1_ip,
4520+
Some("test_recycling_1"),
4521+
&[(PhysPort(11), types::Direction::External)],
4522+
None,
4523+
false,
4524+
None,
4525+
)
4526+
.await;
4527+
4528+
let group1_external_id = group1.external_group_id;
4529+
assert!(group1_external_id.is_some());
4530+
4531+
// Create second group and capture its group IDs
4532+
let group2 = create_test_multicast_group(
4533+
switch,
4534+
group2_ip,
4535+
Some("test_recycling_2"),
4536+
&[(PhysPort(12), types::Direction::External)],
4537+
None,
4538+
false,
4539+
None,
4540+
)
4541+
.await;
4542+
4543+
let group2_external_id = group2.external_group_id;
4544+
assert!(group2_external_id.is_some());
4545+
assert_ne!(group1_external_id, group2_external_id);
4546+
4547+
// Delete the first group
4548+
switch
4549+
.client
4550+
.multicast_group_delete(&group1_ip)
4551+
.await
4552+
.expect("Should be able to delete first group");
4553+
4554+
// Create third group - should reuse the first group's ID
4555+
let group3 = create_test_multicast_group(
4556+
switch,
4557+
group3_ip,
4558+
Some("test_recycling_3"),
4559+
&[(PhysPort(13), types::Direction::External)],
4560+
None,
4561+
false,
4562+
None,
4563+
)
4564+
.await;
4565+
4566+
let group3_external_id = group3.external_group_id;
4567+
assert!(group3_external_id.is_some());
4568+
4569+
// Verify that group3 reused group1's ID due to recycling
4570+
assert_eq!(
4571+
group1_external_id, group3_external_id,
4572+
"Third group should reuse first group's recycled ID"
4573+
);
4574+
4575+
// Cleanup
4576+
cleanup_test_group(switch, group2_ip).await;
4577+
cleanup_test_group(switch, group3_ip).await;
4578+
}

0 commit comments

Comments
 (0)