Skip to content

Commit e7ae18e

Browse files
committed
type annotation updates
1 parent 1c67354 commit e7ae18e

File tree

4 files changed

+23
-23
lines changed

4 files changed

+23
-23
lines changed

assemblyline_core/scaler/controllers/docker_ctl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ def memory_info(self):
243243
self.log.debug(f'Total Memory available {mem}/{self._info["MemTotal"]/mega}')
244244
return mem, total_mem
245245

246-
def get_target(self, service_name):
246+
def get_target(self, service_name: str) -> int:
247247
"""Get how many instances of a service we expect to be running.
248248
249249
Since we start our containers with 'restart always' we just need to count how many

assemblyline_core/scaler/controllers/interface.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,11 @@ def cpu_info(self):
2424
"""Return free and total memory in the system."""
2525
raise NotImplementedError()
2626

27-
def free_cpu(self):
27+
def free_cpu(self) -> float:
2828
"""Number of cores available for reservation."""
2929
return self.cpu_info()[0]
3030

31-
def free_memory(self):
31+
def free_memory(self) -> float:
3232
"""Megabytes of RAM that has not been reserved."""
3333
return self.memory_info()[0]
3434

assemblyline_core/scaler/controllers/kubernetes_ctl.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import os
88
import threading
99
import weakref
10-
from typing import Dict, List, Optional, Tuple
10+
from typing import Optional, Tuple
1111

1212
import urllib3
1313
import kubernetes
@@ -48,7 +48,7 @@ def get_return_type(self, func):
4848
return None
4949

5050

51-
def median(values: List[float]) -> float:
51+
def median(values: list[float]) -> float:
5252
if len(values) == 0:
5353
return 0
5454
return values[len(values)//2]
@@ -149,15 +149,15 @@ def __init__(self, logger, namespace, prefix, priority, cpu_reservation, labels=
149149
self.cpu_reservation: float = max(0.0, min(cpu_reservation, 1.0))
150150
self.logger = logger
151151
self.log_level: str = log_level
152-
self._labels: Dict[str, str] = labels or {}
152+
self._labels: dict[str, str] = labels or {}
153153
self.apps_api = client.AppsV1Api()
154154
self.api = client.CoreV1Api()
155155
self.net_api = client.NetworkingV1Api()
156156
self.namespace: str = namespace
157-
self.config_volumes: Dict[str, V1Volume] = {}
158-
self.config_mounts: Dict[str, V1VolumeMount] = {}
159-
self.core_config_volumes: Dict[str, V1Volume] = {}
160-
self.core_config_mounts: Dict[str, V1VolumeMount] = {}
157+
self.config_volumes: dict[str, V1Volume] = {}
158+
self.config_mounts: dict[str, V1VolumeMount] = {}
159+
self.core_config_volumes: dict[str, V1Volume] = {}
160+
self.core_config_mounts: dict[str, V1VolumeMount] = {}
161161
self._external_profiles = weakref.WeakValueDictionary()
162162
self._service_limited_env: dict[str, dict[str, str]] = defaultdict(dict)
163163

@@ -191,7 +191,7 @@ def __init__(self, logger, namespace, prefix, priority, cpu_reservation, labels=
191191
pod_background = threading.Thread(target=self._loop_forever(self._monitor_pods), daemon=True)
192192
pod_background.start()
193193

194-
self._deployment_targets: Dict[str, int] = {}
194+
self._deployment_targets: dict[str, int] = {}
195195
deployment_background = threading.Thread(target=self._loop_forever(self._monitor_deployments), daemon=True)
196196
deployment_background.start()
197197

@@ -434,7 +434,7 @@ def memory_info(self):
434434
return self._node_pool_max_ram - self._pod_used_ram, self._node_pool_max_ram
435435

436436
@staticmethod
437-
def _create_metadata(deployment_name: str, labels: Dict[str, str]):
437+
def _create_metadata(deployment_name: str, labels: dict[str, str]):
438438
return V1ObjectMeta(name=deployment_name, labels=labels)
439439

440440
def _create_volumes(self, core_mounts=False):
@@ -585,7 +585,7 @@ def get_target(self, service_name: str) -> int:
585585
"""Get the target for running instances of a service."""
586586
return self._deployment_targets.get(service_name, 0)
587587

588-
def get_targets(self) -> Dict[str, int]:
588+
def get_targets(self) -> dict[str, int]:
589589
"""Get the target for running instances of all services."""
590590
return self._deployment_targets
591591

assemblyline_core/scaler/scaler_server.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import threading
77
from collections import defaultdict
88
from string import Template
9-
from typing import Dict, List, Optional, Any
9+
from typing import Optional, Any
1010
import os
1111
import math
1212
import time
@@ -164,12 +164,12 @@ def instance_limit(self):
164164
return self._max_instances
165165

166166
@property
167-
def max_instances(self):
167+
def max_instances(self) -> int:
168168
# Adjust the max_instances based on the number that is already requested
169169
# this keeps the scaler from running way ahead with its demands when resource caps are reached
170170
return min(self._max_instances, self.target_instances + 2)
171171

172-
def update(self, delta, instances, backlog, duty_cycle):
172+
def update(self, delta: float, instances: int, backlog: int, duty_cycle: float):
173173
self.last_update = time.time()
174174
self.running_instances = instances
175175
self.queue_length = backlog
@@ -235,7 +235,7 @@ def __init__(self, config=None, datastore=None, redis=None, redis_persist=None):
235235

236236
self.scaler_timeout_queue = NamedQueue(SCALER_TIMEOUT_QUEUE, host=self.redis_persist)
237237
self.error_count_lock = threading.Lock()
238-
self.error_count: Dict[str, List[float]] = {}
238+
self.error_count: dict[str, list[float]] = {}
239239
self.status_table = ExpiringHash(SERVICE_STATE_HASH, host=self.redis, ttl=30*60)
240240
self.service_change_watcher = EventWatcher(self.redis, deserializer=ServiceChange.deserialize)
241241
self.service_change_watcher.register('changes.services.*', self._handle_service_change_event)
@@ -274,7 +274,7 @@ def __init__(self, config=None, datastore=None, redis=None, redis_persist=None):
274274
self.controller.global_mounts.append((CLASSIFICATION_HOST_PATH, '/etc/assemblyline/classification.yml'))
275275

276276
# Information about services
277-
self.profiles: Dict[str, ServiceProfile] = {}
277+
self.profiles: dict[str, ServiceProfile] = {}
278278
self.profiles_lock = threading.RLock()
279279

280280
# Prepare a single threaded scheduler
@@ -364,7 +364,7 @@ def _sync_service(self, service: Service):
364364
name = service.name
365365
stage = self.get_service_stage(service.name)
366366
default_settings = self.config.core.scaler.service_defaults
367-
image_variables = defaultdict(str)
367+
image_variables: defaultdict[str, str] = defaultdict(str)
368368
image_variables.update(self.config.services.image_variables)
369369

370370
def prepare_container(docker_config: DockerConfig) -> DockerConfig:
@@ -473,7 +473,7 @@ def update_scaling(self):
473473
# Figure out what services are expected to be running and how many
474474
with elasticapm.capture_span('read_profiles'):
475475
with self.profiles_lock:
476-
all_profiles: Dict[str, ServiceProfile] = copy.deepcopy(self.profiles)
476+
all_profiles: dict[str, ServiceProfile] = copy.deepcopy(self.profiles)
477477
raw_targets = self.controller.get_targets()
478478
targets = {_p.name: raw_targets.get(_p.name, 0) for _p in all_profiles.values()}
479479

@@ -516,7 +516,7 @@ def update_scaling(self):
516516
free_memory = self.controller.free_memory()
517517

518518
#
519-
def trim(prof: List[ServiceProfile]):
519+
def trim(prof: list[ServiceProfile]):
520520
prof = [_p for _p in prof if _p.desired_instances > targets[_p.name]]
521521
drop = [_p for _p in prof if _p.cpu > free_cpu or _p.ram > free_memory]
522522
if drop:
@@ -525,7 +525,7 @@ def trim(prof: List[ServiceProfile]):
525525
prof = [_p for _p in prof if _p.cpu <= free_cpu and _p.ram <= free_memory]
526526
return prof
527527

528-
remaining_profiles: List[ServiceProfile] = trim(list(all_profiles.values()))
528+
remaining_profiles: list[ServiceProfile] = trim(list(all_profiles.values()))
529529
# The target values up until now should be in sync with the container orchestrator
530530
# create a copy, so we can track which ones change in the following loop
531531
old_targets = dict(targets)
@@ -553,7 +553,7 @@ def trim(prof: List[ServiceProfile]):
553553
pool.call(self.controller.set_target, name, value)
554554

555555
@elasticapm.capture_span(span_type=APM_SPAN_TYPE)
556-
def handle_service_error(self, service_name):
556+
def handle_service_error(self, service_name: str):
557557
"""Handle an error occurring in the *analysis* service.
558558
559559
Errors for core systems should simply be logged, and a best effort to continue made.

0 commit comments

Comments
 (0)