@@ -208,6 +208,27 @@ fn assert_enforce_nodes_cpu_capacity_post_condition(
208
208
) ;
209
209
}
210
210
211
+ // ----------------------------------------------------
212
+ // Phase 3
213
+ // Place unassigned sources.
214
+ //
215
+ // We use a greedy algorithm as a simple heuristic here.
216
+ //
217
+ // We go through the sources in decreasing order of their load,
218
+ // in two passes.
219
+ //
220
+ // In the first pass, we have a look at
221
+ // the nodes with which there is an affinity.
222
+ //
223
+ // If one of them has room for all of the shards, then we assign all
224
+ // of the shards to it.
225
+ //
226
+ // In the second pass, we just put as many shards as possible on the node
227
+ // with the highest available capacity.
228
+ //
229
+ // If this algorithm fails to place all remaining shards, we inflate
230
+ // the node capacities by 20% in the scheduling problem and start from the beginning.
231
+
211
232
fn attempt_place_unassigned_shards (
212
233
unassigned_shards : & [ Source ] ,
213
234
problem : & SchedulingProblem ,
@@ -262,26 +283,6 @@ fn place_unassigned_shards_with_affinity(
262
283
}
263
284
}
264
285
265
- // ----------------------------------------------------
266
- // Phase 3
267
- // Place unassigned sources.
268
- //
269
- // We use a greedy algorithm as a simple heuristic here.
270
- //
271
- // We go through the sources in decreasing order of their load,
272
- // in two passes.
273
- //
274
- // In the first pass, we have a look at
275
- // the nodes with which there is an affinity.
276
- //
277
- // If one of them has room for all of the shards, then we assign all
278
- // of the shards to it.
279
- //
280
- // In the second pass, we just put as many shards as possible on the node
281
- // with the highest available capacity.
282
- //
283
- // If this algorithm fails to place all remaining shards, we inflate
284
- // the node capacities by 20% in the scheduling problem and start from the beginning.
285
286
#[ must_use]
286
287
fn place_unassigned_shards_ignoring_affinity (
287
288
mut problem : SchedulingProblem ,
@@ -360,10 +361,6 @@ fn place_unassigned_shards_single_source(
360
361
let num_placable_shards = available_capacity. cpu_millis ( ) / source. load_per_shard ;
361
362
let num_shards_to_place = num_placable_shards. min ( num_shards) ;
362
363
// Update the solution, the shard load, and the number of shards to place.
363
- if num_shards_to_place == 0u32 {
364
- // No need to fill indexer_assignments with empty assignments.
365
- continue ;
366
- }
367
364
solution. indexer_assignments [ indexer_ord]
368
365
. add_shards ( source. source_ord , num_shards_to_place) ;
369
366
num_shards -= num_shards_to_place;
@@ -783,7 +780,16 @@ mod tests {
783
780
proptest ! {
784
781
#[ test]
785
782
fn test_proptest_post_conditions( ( problem, solution) in problem_solution_strategy( ) ) {
786
- solve( problem, solution) ;
783
+ let solution_1 = solve( problem. clone( ) , solution) ;
784
+ let solution_2 = solve( problem. clone( ) , solution_1. clone( ) ) ;
785
+ // TODO: This assert actually fails for some scenarii. We say it is fine
786
+ // for now as long as the solution does not change again during the
787
+ // next resolution:
788
+ // let has_solution_changed_once = solution_1.indexer_assignments != solution_2.indexer_assignments;
789
+ // assert!(!has_solution_changed_once, "Solution changed for same problem\nSolution 1:{solution_1:?}\nSolution 2: {solution_2:?}");
790
+ let solution_3 = solve( problem, solution_2. clone( ) ) ;
791
+ let has_solution_changed_again = solution_2. indexer_assignments != solution_3. indexer_assignments;
792
+ assert!( !has_solution_changed_again, "solution unstable!!!\n Solution 1: {solution_1:?}\n Solution 2: {solution_2:?}\n Solution 3: {solution_3:?}" ) ;
787
793
}
788
794
}
789
795
0 commit comments