@@ -255,8 +255,7 @@ PriorityQueue::PolicyQueue::ReadyForErasure()
255
255
}
256
256
257
257
PriorityQueue::PriorityQueue ()
258
- : size_(0 ), front_priority_level_(0 ), last_priority_level_(0 ),
259
- default_policy_ ()
258
+ : size_(0 ), front_priority_level_(0 ), default_policy_()
260
259
{
261
260
queues_.emplace (0 , PolicyQueue (default_policy_, true ));
262
261
front_priority_level_ = queues_.begin ()->first ;
@@ -266,8 +265,7 @@ PriorityQueue::PriorityQueue()
266
265
PriorityQueue::PriorityQueue (
267
266
const inference::ModelQueuePolicy& default_queue_policy,
268
267
uint32_t priority_levels, const ModelQueuePolicyMap queue_policy_map)
269
- : size_(0 ), last_priority_level_(priority_levels),
270
- default_policy_(default_queue_policy)
268
+ : size_(0 ), default_policy_(default_queue_policy)
271
269
{
272
270
// Permanently instantiate PolicyQueue with keep_instantiate=true
273
271
// to prevent them from being erased & created during scheduling
@@ -334,13 +332,13 @@ PriorityQueue::Dequeue(std::unique_ptr<InferenceRequest>* request)
334
332
void
335
333
PriorityQueue::ReleaseRejectedRequests (
336
334
std::shared_ptr<std::vector<std::deque<std::unique_ptr<InferenceRequest>>>>*
337
- requests)
335
+ requests)
338
336
{
339
337
auto res = std::make_shared<
340
338
std::vector<std::deque<std::unique_ptr<InferenceRequest>>>>(
341
339
queues_.size ());
342
340
size_t idx = 0 ;
343
- for (auto it = queues_.begin (); it != queues_.end (); ) {
341
+ for (auto it = queues_.begin (); it != queues_.end ();) {
344
342
it->second .ReleaseRejectedQueue (&((*res)[idx]));
345
343
idx++;
346
344
if (it->second .ReadyForErasure ()) {
@@ -350,7 +348,7 @@ PriorityQueue::ReleaseRejectedRequests(
350
348
it->first == pending_cursor_.curr_it_ ->first ) {
351
349
pending_cursor_.valid_ = false ;
352
350
}
353
- it = queues_.erase (it); // returns iterator following removed element
351
+ it = queues_.erase (it); // returns iterator following removed element
354
352
} else {
355
353
++it;
356
354
}
0 commit comments