@@ -132,7 +132,7 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderSpreadScaler<S> {
132
132
. flat_map ( |( spread, count) | {
133
133
let eligible_hosts = eligible_hosts ( & hosts, spread) ;
134
134
let eligible_count = eligible_hosts. len ( ) ;
135
- // Partition hosts into ones running this provider (no matter what is running it, and others
135
+ // Partition hosts into ones running this provider (no matter what is running it) , and others
136
136
let ( running, other) : ( HashMap < & String , & Host > , HashMap < & String , & Host > ) =
137
137
eligible_hosts. into_iter ( ) . partition ( |( _host_id, host) | {
138
138
host. providers
@@ -145,8 +145,10 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderSpreadScaler<S> {
145
145
} ) ;
146
146
// Get the count of all running providers
147
147
let current_running = running. len ( ) ;
148
- // Now get only the hosts running a provider we own
149
- let running_for_spread = running. into_iter ( ) . filter ( |( _host_id, host) | {
148
+ // NOTE(#120): Now partition providers into ones running for this spread, and ones running for
149
+ // either other spreads or no spread at all. Hosts cannot run multiple providers with the same
150
+ // link name and contract id, so wadm currently will allow these to count up to the total.
151
+ let ( running_for_spread, running_for_other) : ( HashMap < & String , & Host > , HashMap < & String , & Host > ) = running. into_iter ( ) . partition ( |( _host_id, host) | {
150
152
host. providers
151
153
. get ( & ProviderInfo {
152
154
contract_id : contract_id. to_string ( ) ,
@@ -166,12 +168,8 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderSpreadScaler<S> {
166
168
}
167
169
has_annotation
168
170
} )
169
- } )
170
- . unwrap_or_else ( || {
171
- trace ! ( %provider_id, "Couldn't find matching provider in host provider list" ) ;
172
- false
173
- } )
174
- } ) . collect :: < HashMap < _ , _ > > ( ) ;
171
+ } ) . unwrap_or ( false )
172
+ } ) ;
175
173
trace ! ( current_for_spread = %running_for_spread. len( ) , %current_running, expected = %count, eligible_hosts = %eligible_count, %provider_id, "Calculated running providers, reconciling with expected count" ) ;
176
174
match current_running. cmp ( count) {
177
175
// We can only stop providers that we own, so if we have more than we need, we
@@ -195,11 +193,10 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderSpreadScaler<S> {
195
193
. collect :: < Vec < Command > > ( )
196
194
}
197
195
Ordering :: Less => {
198
- let num_to_start = count - current_running;
199
-
200
- // NOTE(brooksmtownsend): It's possible that this does not fully satisfy
201
- // the requirements if we are unable to form enough start commands. Update
202
- // status accordingly once we have a way to.
196
+ // NOTE(#120): Providers running for other spreads or for no spreads can count for up to the total
197
+ // number of providers to satisfy a spread. We do not count them above because we don't want to
198
+ // end up in an infinite loop of stopping other managed providers.
199
+ let num_to_start = count. saturating_sub ( current_running) . saturating_sub ( running_for_other. len ( ) ) ;
203
200
204
201
// Take `num_to_start` commands from this iterator
205
202
let commands = other
0 commit comments