Skip to content

[feature] ASIC-focused multicast replication and dendrite API #14

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 28 commits into from
Jul 16, 2025
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
df62508
[feature] ASIC-focused 1st draft of multicast PRE
zeeshanlakhani Mar 4, 2025
4a945a8
[fix] change stage max
zeeshanlakhani Apr 15, 2025
bb876fa
update chaos tables, revert any p4 formatting for review
zeeshanlakhani Apr 16, 2025
2dcb98e
[minor] comments/align
zeeshanlakhani Apr 16, 2025
3ef78f0
Merge remote-tracking branch 'origin/multicast' into zl/p4-mcast-3
zeeshanlakhani Apr 21, 2025
37498e4
[review] address first pass of review
zeeshanlakhani Apr 21, 2025
d4953df
[review+] meta -> metadata, fix test issues
zeeshanlakhani Apr 21, 2025
dbe684f
[stages] test back to 14
zeeshanlakhani Apr 21, 2025
5537794
..
zeeshanlakhani Apr 22, 2025
1e29a70
[review] validation and underlying api transactions
zeeshanlakhani Apr 23, 2025
8a9fd45
[major changes] Rework groups+dataplane to handle external/underlay/b…
zeeshanlakhani Apr 23, 2025
105a296
Merge remote-tracking branch 'origin/multicast' into zl/p4-mcast
zeeshanlakhani May 22, 2025
1c0e660
[minor] chaos and remove pub
zeeshanlakhani May 22, 2025
57804d1
[minor] openapi update
zeeshanlakhani May 22, 2025
e55ddbc
[update] link-local hop limit handling + counter categories
zeeshanlakhani Jun 3, 2025
09fcec0
Change API into internal/replication vs external
zeeshanlakhani Jun 19, 2025
e3dda26
Merge remote-tracking branch 'origin/multicast' into zl/p4-mcast
zeeshanlakhani Jun 21, 2025
da0c3b3
..
zeeshanlakhani Jun 22, 2025
7158e91
..
zeeshanlakhani Jun 23, 2025
5a26a07
[review] updates on lock handling, ipv6 use, more
zeeshanlakhani Jun 29, 2025
5c096e9
..
zeeshanlakhani Jun 30, 2025
8167280
..
zeeshanlakhani Jun 30, 2025
91cd396
..
zeeshanlakhani Jul 1, 2025
4d6d726
..
zeeshanlakhani Jul 1, 2025
21d9274
[review] updates: locking cleanup and scoped free id(s)
zeeshanlakhani Jul 3, 2025
6f4082e
[review] keep mcast group lock for entire action
zeeshanlakhani Jul 4, 2025
9660726
[review] last bits++
zeeshanlakhani Jul 10, 2025
a3a8abb
minor: squiggly
zeeshanlakhani Jul 10, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
136 changes: 128 additions & 8 deletions dpd-client/tests/integration_tests/mcast.rs
Original file line number Diff line number Diff line change
Expand Up @@ -599,7 +599,6 @@ async fn test_internal_ipv6_validation() {
assert_eq!(updated.tag, Some("updated_tag".to_string()));
assert_eq!(updated.ext_fwding.vlan_id, None);

// Cleanup
cleanup_test_group(switch, created.group_ip).await;
}

Expand Down Expand Up @@ -685,7 +684,6 @@ async fn test_vlan_propagation_to_internal() {
"Admin-scoped group bitmap should have VLAN 42 from external group"
);

// Cleanup
cleanup_test_group(switch, created_admin.group_ip).await;
cleanup_test_group(switch, created_external.group_ip).await;
}
Expand All @@ -695,7 +693,6 @@ async fn test_vlan_propagation_to_internal() {
async fn test_group_api_lifecycle() {
let switch = &*get_switch().await;

// Create admin-scoped IPv6 group for underlay replication infrastructure
let egress1 = PhysPort(28);
let internal_multicast_ip = IpAddr::V6(MULTICAST_NAT_IP);
let underlay_group = create_test_multicast_group(
Expand Down Expand Up @@ -1330,7 +1327,6 @@ async fn test_api_invalid_combinations() {
),
}

// Cleanup
cleanup_test_group(switch, created_ipv4.group_ip).await;
cleanup_test_group(switch, created_non_admin.group_ip).await;
cleanup_test_group(switch, internal_multicast_ip).await;
Expand Down Expand Up @@ -2024,7 +2020,6 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_external_members(
.await
.unwrap();

// Run the test
let result = switch.packet_test(vec![test_pkt], expected_pkts);

check_counter_incremented(
Expand Down Expand Up @@ -2163,7 +2158,6 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_members(
.await
.unwrap();

// Run the test
let result = switch.packet_test(vec![test_pkt], expected_pkts);

check_counter_incremented(
Expand Down Expand Up @@ -2320,7 +2314,6 @@ async fn test_encapped_multicast_geneve_mcast_tag_to_underlay_and_external_membe
.await
.unwrap();

// Run the test
let result = switch.packet_test(vec![test_pkt], expected_pkts);

check_counter_incremented(
Expand Down Expand Up @@ -4335,7 +4328,6 @@ async fn test_external_group_nat_target_validation() {
"External group should have no members"
);

// Cleanup
cleanup_test_group(switch, created_admin.group_ip).await;
cleanup_test_group(switch, created_external.group_ip).await;
}
Expand Down Expand Up @@ -4510,3 +4502,131 @@ async fn test_ipv6_multicast_scope_validation() {
.await
.ok();
}

#[tokio::test]
#[ignore]
async fn test_multicast_group_id_recycling() {
let switch = &*get_switch().await;

// Use admin-scoped IPv6 addresses that get group IDs assigned
let group1_ip = IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 10));
let group2_ip = IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 11));
let group3_ip = IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 12));

// Create first group and capture its group IDs
let group1 = create_test_multicast_group(
switch,
group1_ip,
Some("test_recycling_1"),
&[(PhysPort(11), types::Direction::External)],
None,
false,
None,
)
.await;

let group1_external_id = group1.external_group_id;
assert!(group1_external_id.is_some());

// Create second group and capture its group IDs
let group2 = create_test_multicast_group(
switch,
group2_ip,
Some("test_recycling_2"),
&[(PhysPort(12), types::Direction::External)],
None,
false,
None,
)
.await;

let group2_external_id = group2.external_group_id;
assert!(group2_external_id.is_some());
assert_ne!(group1_external_id, group2_external_id);

// Delete the first group
switch
.client
.multicast_group_delete(&group1_ip)
.await
.expect("Should be able to delete first group");

// Verify group1 was actually deleted
let groups_after_delete1 = switch
.client
.multicast_groups_list_stream(None)
.try_collect::<Vec<_>>()
.await
.expect("Should be able to list groups");
assert!(
!groups_after_delete1.iter().any(|g| g.group_ip == group1_ip),
"Group1 should be deleted"
);

// Create third group - should reuse the first group's ID
let group3 = create_test_multicast_group(
switch,
group3_ip,
Some("test_recycling_3"),
&[(PhysPort(13), types::Direction::External)],
None,
false,
None,
)
.await;

let group3_external_id = group3.external_group_id;
assert!(group3_external_id.is_some());

// Verify that ID recycling is working - group3 should get an ID that was
// previously used
assert_ne!(
group2_external_id, group3_external_id,
"Third group should get a different ID than the active second group"
);

// Create a fourth group after deleting group2, it should reuse group2's ID
switch
.client
.multicast_group_delete(&group2_ip)
.await
.expect("Should be able to delete second group");

// Verify group2 was actually deleted
let groups_after_delete2 = switch
.client
.multicast_groups_list_stream(None)
.try_collect::<Vec<_>>()
.await
.expect("Should be able to list groups");
assert!(
!groups_after_delete2.iter().any(|g| g.group_ip == group2_ip),
"Group2 should be deleted"
);

let group4_ip = IpAddr::V6(Ipv6Addr::new(0xff05, 0, 0, 0, 0, 0, 0, 13));
let group4 = create_test_multicast_group(
switch,
group4_ip,
Some("test_recycling_4"),
&[(PhysPort(14), types::Direction::External)],
None,
false,
None,
)
.await;

let group4_external_id = group4.external_group_id;
assert!(group4_external_id.is_some());

// Group4 should reuse group2's recently freed ID due to stack-like
// allocation
assert_eq!(
group2_external_id, group4_external_id,
"Fourth group should reuse second group's recycled ID"
);

// Cleanup - clean up remaining active groups
cleanup_test_group(switch, group3_ip).await;
cleanup_test_group(switch, group4_ip).await;
}
Loading