Skip to content

Commit 768a5f6

Browse files
[review] updates: ..
1 parent 61749ee commit 768a5f6

File tree

1 file changed

+30
-34
lines changed

1 file changed

+30
-34
lines changed

dpd/src/mcast/mod.rs

Lines changed: 30 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -246,12 +246,13 @@ pub struct MulticastGroupData {
246246
impl MulticastGroupData {
247247
const GENERATOR_START: u16 = 100;
248248

249+
/// Creates a new instance of MulticastGroupData with pre-populated free
250+
/// group IDs.
249251
pub(crate) fn new() -> Self {
250252
// Pre-populate with all available IDs from GENERATOR_START to u16::MAX-1
251253
// Using a Vec as a stack for O(1) push/pop operations
252-
let free_group_ids = Arc::new(Mutex::new(
253-
(Self::GENERATOR_START..u16::MAX).collect::<Vec<u16>>(),
254-
));
254+
let free_group_ids =
255+
Arc::new(Mutex::new((Self::GENERATOR_START..u16::MAX).collect()));
255256

256257
Self {
257258
groups: BTreeMap::new(),
@@ -263,6 +264,7 @@ impl MulticastGroupData {
263264
/// Generates a unique multicast group ID with automatic cleanup on drop.
264265
///
265266
/// O(1) allocation from pre-populated free list. Never allocates.
267+
///
266268
/// IDs below GENERATOR_START (100) to avoid conflicts with reserved ranges.
267269
///
268270
/// Returns a ScopedGroupId that will automatically return the ID to the
@@ -402,17 +404,11 @@ pub(crate) fn add_group_external(
402404
let internal_ip = group_info.nat_target.internal_ip.into();
403405
debug!(
404406
s.log,
405-
"External group {} with VLAN {} references internal group {}",
407+
"External group {} with VLAN {} references internal group {}, propagating VLAN to existing internal group",
406408
group_ip,
407409
vlan_id,
408410
internal_ip
409411
);
410-
debug!(
411-
s.log,
412-
"Propagating VLAN {} to existing internal group {}",
413-
vlan_id,
414-
internal_ip
415-
);
416412

417413
let internal_group = mcast
418414
.groups
@@ -581,7 +577,7 @@ fn add_group_internal_only(
581577
/// Delete a multicast group from the switch, including all associated tables
582578
/// and port mappings.
583579
pub(crate) fn del_group(s: &Switch, group_ip: IpAddr) -> DpdResult<()> {
584-
let (group, group_ids_to_free, nat_target_to_remove) = {
580+
let (group, nat_target_to_remove) = {
585581
let mut mcast = s.mcast.lock().unwrap();
586582

587583
let group = mcast.groups.remove(&group_ip).ok_or_else(|| {
@@ -591,21 +587,12 @@ pub(crate) fn del_group(s: &Switch, group_ip: IpAddr) -> DpdResult<()> {
591587
))
592588
})?;
593589

594-
// Collect group IDs that need to be freed
595-
let mut group_ids_to_free = Vec::new();
596-
if let Some(external_id) = group.external_group_id {
597-
group_ids_to_free.push(external_id);
598-
}
599-
if let Some(underlay_id) = group.underlay_group_id {
600-
group_ids_to_free.push(underlay_id);
601-
}
602-
603590
let nat_target_to_remove = group
604591
.int_fwding
605592
.nat_target
606593
.map(|nat| nat.internal_ip.into());
607594

608-
(group, group_ids_to_free, nat_target_to_remove)
595+
(group, nat_target_to_remove)
609596
};
610597

611598
debug!(s.log, "deleting multicast group for IP {}", group_ip);
@@ -624,16 +611,6 @@ pub(crate) fn del_group(s: &Switch, group_ip: IpAddr) -> DpdResult<()> {
624611
if let Some(internal_ip) = nat_target_to_remove {
625612
mcast.remove_nat_target_ref(group_ip, internal_ip);
626613
}
627-
if !group_ids_to_free.is_empty() {
628-
if let Ok(mut pool) = mcast.free_group_ids.lock() {
629-
for group_id in group_ids_to_free {
630-
// Only add to free list if it's in valid range
631-
if group_id >= MulticastGroupData::GENERATOR_START {
632-
pool.push(group_id);
633-
}
634-
}
635-
}
636-
}
637614
}
638615

639616
Ok(())
@@ -1302,6 +1279,25 @@ fn delete_multicast_groups(
13021279
})?;
13031280
}
13041281

1282+
// Return group IDs to the free pool
1283+
let free_group_ids = {
1284+
let mcast = s.mcast.lock().unwrap();
1285+
mcast.free_group_ids.clone() // Cloning an ARC (cheap clone)
1286+
};
1287+
1288+
if let Ok(mut pool) = free_group_ids.lock() {
1289+
if let Some(external_id) = external_group_id {
1290+
if external_id >= MulticastGroupData::GENERATOR_START {
1291+
pool.push(external_id);
1292+
}
1293+
}
1294+
if let Some(underlay_id) = underlay_group_id {
1295+
if underlay_id >= MulticastGroupData::GENERATOR_START {
1296+
pool.push(underlay_id);
1297+
}
1298+
}
1299+
}
1300+
13051301
Ok(())
13061302
}
13071303

@@ -2420,7 +2416,7 @@ mod tests {
24202416
#[test]
24212417
fn test_concurrent_id_allocation() {
24222418
let mcast_data = Arc::new(Mutex::new(MulticastGroupData::new()));
2423-
let mut handles = vec![];
2419+
let mut handles = Vec::new();
24242420

24252421
// Spawn multiple threads to allocate IDs concurrently
24262422
for _ in 0..10 {
@@ -2435,7 +2431,7 @@ mod tests {
24352431
}
24362432

24372433
// Collect all allocated IDs
2438-
let mut allocated_ids = vec![];
2434+
let mut allocated_ids = Vec::new();
24392435
for handle in handles {
24402436
allocated_ids.push(handle.join().unwrap());
24412437
}
@@ -2455,7 +2451,7 @@ mod tests {
24552451
#[test]
24562452
fn test_concurrent_allocation_and_deallocation() {
24572453
let mcast_data = Arc::new(Mutex::new(MulticastGroupData::new()));
2458-
let mut handles = vec![];
2454+
let mut handles = Vec::new();
24592455

24602456
// Spawn threads that allocate and immediately drop (deallocate)
24612457
for _ in 0..5 {

0 commit comments

Comments
 (0)