Skip to content

Commit 4ddaa8a

Browse files
authored
[core] Fix all raylet variable shadowing (#51689)
Signed-off-by: dentiny <dentinyhao@gmail.com>
1 parent 9440ac4 commit 4ddaa8a

File tree

6 files changed

+48
-43
lines changed

6 files changed

+48
-43
lines changed

src/ray/object_manager/plasma/store.cc

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,6 @@ Status PlasmaStore::ProcessClientMessage(std::shared_ptr<Client> client,
380380
// TODO(suquark): We should convert these interfaces to const later.
381381
const uint8_t *input = const_cast<uint8_t *>(message.data());
382382
size_t input_size = message.size();
383-
ObjectID object_id;
384383

385384
// Process the different types of requests.
386385
switch (type) {
@@ -425,6 +424,7 @@ Status PlasmaStore::ProcessClientMessage(std::shared_ptr<Client> client,
425424
ReplyToCreateClient(client, object_id, request->request_id());
426425
} break;
427426
case fb::MessageType::PlasmaAbortRequest: {
427+
ObjectID object_id;
428428
RAY_RETURN_NOT_OK(ReadAbortRequest(input, input_size, &object_id));
429429
RAY_CHECK(AbortObject(object_id, client) == 1) << "To abort an object, the only "
430430
"client currently using it "
@@ -443,6 +443,7 @@ Status PlasmaStore::ProcessClientMessage(std::shared_ptr<Client> client,
443443
// May unmap: client knows a fallback-allocated fd is involved.
444444
// Should unmap: server finds refcnt == 0 -> need to be unmapped.
445445
bool may_unmap;
446+
ObjectID object_id;
446447
RAY_RETURN_NOT_OK(ReadReleaseRequest(input, input_size, &object_id, &may_unmap));
447448
bool should_unmap = ReleaseObject(object_id, client);
448449
if (!may_unmap) {
@@ -468,6 +469,7 @@ Status PlasmaStore::ProcessClientMessage(std::shared_ptr<Client> client,
468469
RAY_RETURN_NOT_OK(SendDeleteReply(client, object_ids, error_codes));
469470
} break;
470471
case fb::MessageType::PlasmaContainsRequest: {
472+
ObjectID object_id;
471473
RAY_RETURN_NOT_OK(ReadContainsRequest(input, input_size, &object_id));
472474
if (object_lifecycle_mgr_.IsObjectSealed(object_id)) {
473475
RAY_RETURN_NOT_OK(SendContainsReply(client, object_id, 1));
@@ -476,6 +478,7 @@ Status PlasmaStore::ProcessClientMessage(std::shared_ptr<Client> client,
476478
}
477479
} break;
478480
case fb::MessageType::PlasmaSealRequest: {
481+
ObjectID object_id;
479482
RAY_RETURN_NOT_OK(ReadSealRequest(input, input_size, &object_id));
480483
SealObjects({object_id});
481484
RAY_RETURN_NOT_OK(SendSealReply(client, object_id, PlasmaError::OK));

src/ray/raylet/dependency_manager.cc

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -97,19 +97,19 @@ void DependencyManager::StartOrUpdateWaitRequest(
9797

9898
void DependencyManager::CancelWaitRequest(const WorkerID &worker_id) {
9999
RAY_LOG(DEBUG) << "Canceling wait request for worker " << worker_id;
100-
auto it = wait_requests_.find(worker_id);
101-
if (it == wait_requests_.end()) {
100+
auto req_iter = wait_requests_.find(worker_id);
101+
if (req_iter == wait_requests_.end()) {
102102
return;
103103
}
104104

105-
for (const auto &obj_id : it->second) {
106-
auto it = required_objects_.find(obj_id);
107-
RAY_CHECK(it != required_objects_.end());
108-
it->second.dependent_wait_requests.erase(worker_id);
109-
RemoveObjectIfNotNeeded(it);
105+
for (const auto &obj_id : req_iter->second) {
106+
auto obj_iter = required_objects_.find(obj_id);
107+
RAY_CHECK(obj_iter != required_objects_.end());
108+
obj_iter->second.dependent_wait_requests.erase(worker_id);
109+
RemoveObjectIfNotNeeded(obj_iter);
110110
}
111111

112-
wait_requests_.erase(it);
112+
wait_requests_.erase(req_iter);
113113
}
114114

115115
void DependencyManager::StartOrUpdateGetRequest(
@@ -152,23 +152,23 @@ void DependencyManager::StartOrUpdateGetRequest(
152152

153153
void DependencyManager::CancelGetRequest(const WorkerID &worker_id) {
154154
RAY_LOG(DEBUG) << "Canceling get request for worker " << worker_id;
155-
auto it = get_requests_.find(worker_id);
156-
if (it == get_requests_.end()) {
155+
auto req_iter = get_requests_.find(worker_id);
156+
if (req_iter == get_requests_.end()) {
157157
return;
158158
}
159159

160160
RAY_LOG(DEBUG) << "Canceling pull for get request from worker " << worker_id
161-
<< " request: " << it->second.second;
162-
object_manager_.CancelPull(it->second.second);
163-
164-
for (const auto &obj_id : it->second.first) {
165-
auto it = required_objects_.find(obj_id);
166-
RAY_CHECK(it != required_objects_.end());
167-
it->second.dependent_get_requests.erase(worker_id);
168-
RemoveObjectIfNotNeeded(it);
161+
<< " request: " << req_iter->second.second;
162+
object_manager_.CancelPull(req_iter->second.second);
163+
164+
for (const auto &obj_id : req_iter->second.first) {
165+
auto obj_iter = required_objects_.find(obj_id);
166+
RAY_CHECK(obj_iter != required_objects_.end());
167+
obj_iter->second.dependent_get_requests.erase(worker_id);
168+
RemoveObjectIfNotNeeded(obj_iter);
169169
}
170170

171-
get_requests_.erase(it);
171+
get_requests_.erase(req_iter);
172172
}
173173

174174
/// Request dependencies for a queued task.

src/ray/raylet/local_task_manager.cc

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -172,14 +172,14 @@ void LocalTaskManager::DispatchScheduledTasksToWorkers() {
172172
// requests.
173173
size_t num_classes_with_cpu = 0;
174174
for (const auto &entry : tasks_to_dispatch_) {
175-
const auto &dispatch_queue = entry.second;
176-
for (const auto &work : dispatch_queue) {
175+
const auto &cur_dispatch_queue = entry.second;
176+
for (const auto &work : cur_dispatch_queue) {
177177
const auto &task_spec = work->task.GetTaskSpecification();
178178
auto cpu_request_ =
179179
task_spec.GetRequiredResources().Get(scheduling::ResourceID::CPU()).Double();
180180
if (cpu_request_ > 0) {
181181
num_classes_with_cpu++;
182-
total_cpu_requests_ += dispatch_queue.size() * cpu_request_;
182+
total_cpu_requests_ += cur_dispatch_queue.size() * cpu_request_;
183183
break;
184184
}
185185
}
@@ -201,9 +201,10 @@ void LocalTaskManager::DispatchScheduledTasksToWorkers() {
201201
size_t total_cpu_running_tasks = 0;
202202
for (auto &entry : info_by_sched_cls_) {
203203
// Only consider CPU requests
204-
const auto &sched_cls_desc =
204+
const auto &cur_sched_cls_desc =
205205
TaskSpecification::GetSchedulingClassDescriptor(entry.first);
206-
if (sched_cls_desc.resource_set.Get(scheduling::ResourceID::CPU()).Double() > 0) {
206+
if (cur_sched_cls_desc.resource_set.Get(scheduling::ResourceID::CPU()).Double() >
207+
0) {
207208
total_cpu_running_tasks += entry.second.running_tasks.size();
208209
}
209210
}
@@ -812,13 +813,12 @@ void LocalTaskManager::PinTaskArgs(const TaskSpecification &spec,
812813
// TODO(swang): This should really be an assertion, but we can sometimes
813814
// receive a duplicate task request if there is a failure and the original
814815
// version of the task has not yet been canceled.
815-
auto inserted = executing_task_args_.emplace(spec.TaskId(), deps).second;
816-
if (inserted) {
816+
auto executed_task_inserted = executing_task_args_.emplace(spec.TaskId(), deps).second;
817+
if (executed_task_inserted) {
817818
for (size_t i = 0; i < deps.size(); i++) {
818-
auto inserted =
819+
auto [it, pinned_task_inserted] =
819820
pinned_task_arguments_.emplace(deps[i], std::make_pair(std::move(args[i]), 0));
820-
auto it = inserted.first;
821-
if (inserted.second) {
821+
if (pinned_task_inserted) {
822822
// This is the first task that needed this argument.
823823
pinned_task_arguments_bytes_ += it->second.first->GetSize();
824824
}

src/ray/raylet/scheduling/cluster_task_manager.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -253,8 +253,8 @@ void ClusterTaskManager::ScheduleAndDispatchTasks() {
253253
if (is_infeasible) {
254254
RAY_CHECK(!work_queue.empty());
255255
// Only announce the first item as infeasible.
256-
auto &work_queue = shapes_it->second;
257-
const auto &work = work_queue[0];
256+
auto &cur_work_queue = shapes_it->second;
257+
const auto &work = cur_work_queue[0];
258258
const RayTask task = work->task;
259259
if (announce_infeasible_task_) {
260260
announce_infeasible_task_(task);

src/ray/raylet/scheduling/policy/bundle_scheduling_policy.cc

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -306,11 +306,12 @@ SchedulingResult BundlePackSchedulingPolicy::Schedule(
306306
}
307307

308308
// Releasing the resources temporarily deducted from `cluster_resource_manager_`.
309-
for (size_t index = 0; index < result_nodes.size(); index++) {
309+
for (size_t res_node_idx = 0; res_node_idx < result_nodes.size(); res_node_idx++) {
310310
// If `PackSchedule` fails, the id of some nodes may be nil.
311-
if (!result_nodes[index].IsNil()) {
311+
if (!result_nodes[res_node_idx].IsNil()) {
312312
RAY_CHECK(cluster_resource_manager_.AddNodeAvailableResources(
313-
result_nodes[index], (*sorted_resource_request_list[index]).GetResourceSet()));
313+
result_nodes[res_node_idx],
314+
(*sorted_resource_request_list[res_node_idx]).GetResourceSet()));
314315
}
315316
}
316317

@@ -361,10 +362,10 @@ SchedulingResult BundleSpreadSchedulingPolicy::Schedule(
361362
selected_nodes.emplace(best_node);
362363
} else {
363364
// Scheduling from selected nodes.
364-
auto best_node = GetBestNode(*resource_request,
365-
selected_nodes,
366-
options,
367-
available_cpus_before_bundle_scheduling);
365+
best_node = GetBestNode(*resource_request,
366+
selected_nodes,
367+
options,
368+
available_cpus_before_bundle_scheduling);
368369
if (!best_node.first.IsNil()) {
369370
result_nodes.emplace_back(best_node.first);
370371
RAY_CHECK(cluster_resource_manager_.SubtractNodeAvailableResources(

src/ray/raylet/worker_pool.cc

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1583,10 +1583,11 @@ void WorkerPool::DisconnectWorker(const std::shared_ptr<WorkerInterface> &worker
15831583
return;
15841584
}
15851585

1586-
for (auto it = idle_of_all_languages_.begin(); it != idle_of_all_languages_.end();
1587-
it++) {
1588-
if (it->worker == worker) {
1589-
idle_of_all_languages_.erase(it);
1586+
for (auto idle_worker_iter = idle_of_all_languages_.begin();
1587+
idle_worker_iter != idle_of_all_languages_.end();
1588+
idle_worker_iter++) {
1589+
if (idle_worker_iter->worker == worker) {
1590+
idle_of_all_languages_.erase(idle_worker_iter);
15901591
break;
15911592
}
15921593
}

0 commit comments

Comments
 (0)