Skip to content

Commit 141ba6c

Browse files
committed
Getting cleaner
1 parent 8698002 commit 141ba6c

File tree

2 files changed

+54
-69
lines changed

2 files changed

+54
-69
lines changed

evaluation_script/__init__.py

Lines changed: 13 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,16 @@
44
import urllib.request
55
import json
66

7+
def version_to_tuple(version):
8+
# Split version by '.' and keep only numeric parts
9+
numeric_parts = []
10+
for part in version.split("."):
11+
# Extract leading numeric portion of each part
12+
numeric_part = "".join(c for c in part if c.isdigit())
13+
if numeric_part:
14+
numeric_parts.append(int(numeric_part))
15+
return tuple(numeric_parts)
16+
717
def is_package_version_on_pypi(package_name, version=None):
818
"""
919
Checks if a package (and optionally a specific version) exists on PyPI.
@@ -53,9 +63,8 @@ def is_package_version_on_pypi(package_name, version=None):
5363
return False
5464

5565
def install(package):
56-
# Install a pip python package
57-
5866
try:
67+
# Install a pip python package
5968
subprocess.run([sys.executable,"-m","pip","install","--disable-pip-version-check",package])
6069
except subprocess.CalledProcessError as e:
6170
print(f"Error occurred while installing {package}: {e.stderr}")
@@ -68,11 +77,9 @@ def install(package):
6877
sys.stderr.flush()
6978

7079
# Install standard dependencies
71-
# install("argcomplete")
72-
# install("colorama")
80+
install("colorama")
7381
install("pillow")
7482
# install("pykitti") # Might install additional light deps
75-
# install("rosbags")
7683
# is_package_version_on_pypi("natsort")
7784
install("natsort")
7885
# install("lz4")
@@ -81,22 +88,10 @@ def install(package):
8188
# Install evo from local wheel inside evaluation_script/deps/
8289
this_dir = os.path.dirname(__file__)
8390
evo_wheel_path = os.path.join(this_dir, "deps", "evo-1.31.1-py3-none-any.whl")
84-
85-
# # try:
8691
subprocess.check_call([sys.executable, "-m", "pip", "install", "--no-deps","--disable-pip-version-check", "--ignore-requires-python", evo_wheel_path])
8792

93+
# Verify evo version
8894
from evo import __version__
89-
90-
def version_to_tuple(version):
91-
# Split version by '.' and keep only numeric parts
92-
numeric_parts = []
93-
for part in version.split("."):
94-
# Extract leading numeric portion of each part
95-
numeric_part = "".join(c for c in part if c.isdigit())
96-
if numeric_part:
97-
numeric_parts.append(int(numeric_part))
98-
return tuple(numeric_parts)
99-
10095
evo_version_tuple = version_to_tuple(__version__)
10196
required_version_tuple = (1, 30, 1)
10297

@@ -109,12 +104,6 @@ def version_to_tuple(version):
109104
print(f"✅ Evo version {__version__} meets the required version.")
110105
sys.stdout.flush()
111106

112-
113-
from evo.core import sync
114-
from evo.core.trajectory import PoseTrajectory3D
115-
from evo.core.trajectory import Plane
116-
from evo.core.metrics import PoseRelation, Unit
117-
118107
print("✅ evo is installed and available.")
119108
sys.stdout.flush()
120109

evaluation_script/main.py

Lines changed: 41 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -184,51 +184,47 @@ def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwarg
184184
# }
185185
# )
186186

187-
# The following line might need adjustment depending on EvalAI requirements.
188-
# If EvalAI expects a specific structure like the original one,
189-
# you might need to aggregate or select specific results.
190-
# For now, let's keep the first split's results for submission_result as an example.
191-
# if output["result"]:
192-
# total_ate = 0
193-
# total_rte = 0
194-
# total_le = 0
195-
# count = 0
196-
197-
# for split_result in output["result"]:
198-
# # Each split_result is a dict like {"split_name": {"ATE": x, "RTE": y, "LE": z}}
199-
# # Get the inner metrics dictionary (assuming only one key per outer dict)
200-
# split_name = list(split_result.keys())[0]
201-
# metrics = split_result[split_name]
202-
203-
# # Accumulate metrics if they exist and are not None
204-
# if metrics.get("ATE") is not None:
205-
# total_ate += metrics["ATE"]
206-
# if metrics.get("RTE") is not None:
207-
# total_rte += metrics["RTE"]
208-
# if metrics.get("LE") is not None:
209-
# total_le += metrics["LE"]
210-
# count += 1 # Increment count for each split processed
211-
212-
# # Calculate averages, handle division by zero if count is 0
213-
# if count > 0:
214-
# avg_ate = total_ate / count
215-
# avg_rte = total_rte / count
216-
# avg_le = total_le / count
217-
# output["submission_result"] = {
218-
# "ATE": avg_ate,
219-
# "RTE": avg_rte,
220-
# "LE": avg_le
221-
# }
222-
# else:
223-
# # Handle case with no valid metrics found
224-
# output["submission_result"] = {
225-
# "ATE": None,
226-
# "RTE": None,
227-
# "LE": None
228-
# }
229-
# else:
230-
# output["submission_result"] = {} # Handle case with no evaluated metrics
231-
output["submission_result"] = output["result"][0]["heap"]
187+
if output["result"]:
188+
total_ate = 0
189+
total_rte = 0
190+
total_le = 0
191+
count = 0
192+
193+
for split_result in output["result"]:
194+
# Each split_result is a dict like {"split_name": {"ATE": x, "RTE": y, "LE": z}}
195+
# Get the inner metrics dictionary (assuming only one key per outer dict)
196+
split_name = list(split_result.keys())[0]
197+
metrics = split_result[split_name]
198+
199+
# Accumulate metrics if they exist and are not None
200+
if metrics.get("ATE") is not None:
201+
total_ate += metrics["ATE"]
202+
if metrics.get("RTE") is not None:
203+
total_rte += metrics["RTE"]
204+
if metrics.get("LE") is not None:
205+
total_le += metrics["LE"]
206+
count += 1 # Increment count for each split processed
207+
208+
# Calculate averages, handle division by zero if count is 0
209+
if count > 0:
210+
avg_ate = total_ate / count
211+
avg_rte = total_rte / count
212+
avg_le = total_le / count
213+
output["submission_result"] = {
214+
"ATE": avg_ate,
215+
"RTE": avg_rte,
216+
"LE": avg_le
217+
}
218+
else:
219+
# Handle case with no valid metrics found
220+
output["submission_result"] = {
221+
"ATE": None,
222+
"RTE": None,
223+
"LE": None
224+
}
225+
else:
226+
output["submission_result"] = {} # Handle case with no evaluated metrics
227+
232228
# Placeholder for submission metadata based on the requested format.
233229
# Actual values should be populated based on evaluation results if applicable.
234230
output["submission_metadata"] = json.dumps(kwargs['submission_metadata'])

0 commit comments

Comments
 (0)