Skip to content

Commit 6d3091d

Browse files
committed
metadata and submission results
1 parent 9912a41 commit 6d3091d

File tree

4 files changed

+90
-97
lines changed

4 files changed

+90
-97
lines changed

challenge_data/challenge_1/main.py

Lines changed: 90 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import os
22
import io
33
import zipfile
4+
import json
45
print("Starting Evaluation.....")
56
print("Starting Evaluation.....")
67
print("Starting Evaluation.....")
@@ -190,62 +191,99 @@ def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwarg
190191
# you might need to aggregate or select specific results.
191192
# For now, let's keep the first split's results for submission_result as an example.
192193
if output["result"]:
193-
first_split_key = list(output["result"][0].keys())[0]
194-
output["submission_result"] = output["result"][0][first_split_key]
194+
total_ate = 0
195+
total_rte = 0
196+
total_le = 0
197+
count = 0
198+
199+
for split_result in output["result"]:
200+
# Each split_result is a dict like {"split_name": {"ATE": x, "RTE": y, "LE": z}}
201+
# Get the inner metrics dictionary (assuming only one key per outer dict)
202+
split_name = list(split_result.keys())[0]
203+
metrics = split_result[split_name]
204+
205+
# Accumulate metrics if they exist and are not None
206+
if metrics.get("ATE") is not None:
207+
total_ate += metrics["ATE"]
208+
if metrics.get("RTE") is not None:
209+
total_rte += metrics["RTE"]
210+
if metrics.get("LE") is not None:
211+
total_le += metrics["LE"]
212+
count += 1 # Increment count for each split processed
213+
214+
# Calculate averages, handle division by zero if count is 0
215+
if count > 0:
216+
avg_ate = total_ate / count
217+
avg_rte = total_rte / count
218+
avg_le = total_le / count
219+
output["submission_result"] = {
220+
"ATE": avg_ate,
221+
"RTE": avg_rte,
222+
"LE": avg_le
223+
}
224+
else:
225+
# Handle case with no valid metrics found
226+
output["submission_result"] = {
227+
"ATE": None,
228+
"RTE": None,
229+
"LE": None
230+
}
195231
else:
196232
output["submission_result"] = {} # Handle case with no evaluated metrics
197233
# Placeholder for submission metadata based on the requested format.
198234
# Actual values should be populated based on evaluation results if applicable.
199-
output["submission_metadata"] = {
200-
"heap": {
201-
"metrics": {
202-
"time": 5, # Replace with actual accuracy
203-
"length": 10
204-
# Add more qid: acc pairs as needed
205-
},
206-
"logs": {
207-
"rate": 100, # Replace with actual accuracy
208-
"suspicious": "no"
209-
# Add more qtype: acc pairs as needed
210-
}
211-
},
212-
"eiger": {
213-
"metrics": {
214-
"time": 5, # Replace with actual accuracy
215-
"length": 10
216-
# Add more qid: acc pairs as needed
217-
},
218-
"logs": {
219-
"rate": 100, # Replace with actual accuracy
220-
"suspicious": "no"
221-
# Add more qtype: acc pairs as needed
222-
}
223-
},
224-
"tt3": {
225-
"metrics": {
226-
"time": 5, # Replace with actual accuracy
227-
"length": 10
228-
# Add more qid: acc pairs as needed
229-
},
230-
"logs": {
231-
"rate": 100, # Replace with actual accuracy
232-
"suspicious": "no"
233-
# Add more qtype: acc pairs as needed
234-
}
235-
},
236-
"tt4": {
237-
"metrics": {
238-
"time": 5, # Replace with actual accuracy
239-
"length": 10
240-
# Add more qid: acc pairs as needed
241-
},
242-
"logs": {
243-
"rate": 100, # Replace with actual accuracy
244-
"suspicious": "no"
245-
# Add more qtype: acc pairs as needed
246-
}
247-
}
248-
}
235+
output["submission_metadata"] = json.dumps(kwargs['submission_metadata'])
236+
237+
# output["submission_metadata"] = {
238+
# "heap": {
239+
# "metrics": {
240+
# "time": 5, # Replace with actual accuracy
241+
# "length": 10
242+
# # Add more qid: acc pairs as needed
243+
# },
244+
# "logs": {
245+
# "rate": 100, # Replace with actual accuracy
246+
# "suspicious": "no"
247+
# # Add more qtype: acc pairs as needed
248+
# }
249+
# },
250+
# "eiger": {
251+
# "metrics": {
252+
# "time": 5, # Replace with actual accuracy
253+
# "length": 10
254+
# # Add more qid: acc pairs as needed
255+
# },
256+
# "logs": {
257+
# "rate": 100, # Replace with actual accuracy
258+
# "suspicious": "no"
259+
# # Add more qtype: acc pairs as needed
260+
# }
261+
# },
262+
# "tt3": {
263+
# "metrics": {
264+
# "time": 5, # Replace with actual accuracy
265+
# "length": 10
266+
# # Add more qid: acc pairs as needed
267+
# },
268+
# "logs": {
269+
# "rate": 100, # Replace with actual accuracy
270+
# "suspicious": "no"
271+
# # Add more qtype: acc pairs as needed
272+
# }
273+
# },
274+
# "tt4": {
275+
# "metrics": {
276+
# "time": 5, # Replace with actual accuracy
277+
# "length": 10
278+
# # Add more qid: acc pairs as needed
279+
# },
280+
# "logs": {
281+
# "rate": 100, # Replace with actual accuracy
282+
# "suspicious": "no"
283+
# # Add more qtype: acc pairs as needed
284+
# }
285+
# }
286+
# }
249287

250288
print("Completed evaluation for Dev Phase")
251289

evaluation_script/evo_ape_fork.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
along with evo. If not, see <http://www.gnu.org/licenses/>.
2222
"""
2323

24-
import logging
2524
import typing
2625

2726
import numpy as np
@@ -31,10 +30,6 @@
3130
from evo.core.trajectory import PosePath3D, PoseTrajectory3D, Plane
3231
from evo.tools.settings import SETTINGS
3332

34-
logger = logging.getLogger(__name__)
35-
36-
SEP = "-" * 80 # separator line
37-
3833

3934
def ape(traj_ref: PosePath3D, traj_est: PosePath3D,
4035
pose_relation: metrics.PoseRelation, align: bool = False,
@@ -48,23 +43,17 @@ def ape(traj_ref: PosePath3D, traj_est: PosePath3D,
4843
only_scale = correct_scale and not align
4944
alignment_transformation = None
5045
if align or correct_scale:
51-
logger.debug(SEP)
5246
alignment_transformation = lie_algebra.sim3(
5347
*traj_est.align(traj_ref, correct_scale, only_scale, n=n_to_align))
5448
if align_origin:
55-
logger.debug(SEP)
5649
alignment_transformation = traj_est.align_origin(traj_ref)
5750

5851
# Projection is done after potential 3D alignment & transformation steps.
5952
if project_to_plane:
60-
logger.debug(SEP)
61-
logger.debug("Projecting trajectories to %s plane.",
62-
project_to_plane.value)
6353
traj_ref.project(project_to_plane)
6454
traj_est.project(project_to_plane)
6555

6656
# Calculate APE.
67-
logger.debug(SEP)
6857
data = (traj_ref, traj_est)
6958
ape_metric = metrics.APE(pose_relation)
7059
ape_metric.process_data(data)
@@ -92,9 +81,6 @@ def ape(traj_ref: PosePath3D, traj_est: PosePath3D,
9281
ape_result = ape_metric.get_result(ref_name, est_name)
9382
ape_result.info["title"] = title
9483

95-
logger.debug(SEP)
96-
logger.info(ape_result.pretty_str())
97-
9884
ape_result.add_trajectory(ref_name, traj_ref)
9985
ape_result.add_trajectory(est_name, traj_est)
10086
if isinstance(traj_est, PoseTrajectory3D):
@@ -110,8 +96,3 @@ def ape(traj_ref: PosePath3D, traj_est: PosePath3D,
11096
alignment_transformation)
11197

11298
return ape_result
113-
114-
115-
# if __name__ == '__main__':
116-
# from evo import entry_points
117-
# entry_points.ape()

evaluation_script/evo_rpe_fork.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
along with evo. If not, see <http://www.gnu.org/licenses/>.
2222
"""
2323

24-
import logging
2524
import typing
2625

2726
import numpy as np
@@ -30,10 +29,6 @@
3029
from evo.core.trajectory import PosePath3D, PoseTrajectory3D, Plane
3130
from evo.tools.settings import SETTINGS
3231

33-
logger = logging.getLogger(__name__)
34-
35-
SEP = "-" * 80 # separator line
36-
3732

3833
def rpe(traj_ref: PosePath3D, traj_est: PosePath3D,
3934
pose_relation: metrics.PoseRelation, delta: float,
@@ -49,23 +44,17 @@ def rpe(traj_ref: PosePath3D, traj_est: PosePath3D,
4944
only_scale = correct_scale and not align
5045
alignment_transformation = None
5146
if align or correct_scale:
52-
logger.debug(SEP)
5347
alignment_transformation = lie_algebra.sim3(
5448
*traj_est.align(traj_ref, correct_scale, only_scale, n=n_to_align))
5549
if align_origin:
56-
logger.debug(SEP)
5750
alignment_transformation = traj_est.align_origin(traj_ref)
5851

5952
# Projection is done after potential 3D alignment & transformation steps.
6053
if project_to_plane:
61-
logger.debug(SEP)
62-
logger.debug("Projecting trajectories to %s plane.",
63-
project_to_plane.value)
6454
traj_ref.project(project_to_plane)
6555
traj_est.project(project_to_plane)
6656

6757
# Calculate RPE.
68-
logger.debug(SEP)
6958
data = (traj_ref, traj_est)
7059
rpe_metric = metrics.RPE(pose_relation, delta, delta_unit, rel_delta_tol,
7160
all_pairs, pairs_from_reference)
@@ -93,8 +82,6 @@ def rpe(traj_ref: PosePath3D, traj_est: PosePath3D,
9382

9483
rpe_result = rpe_metric.get_result(ref_name, est_name)
9584
rpe_result.info["title"] = title
96-
logger.debug(SEP)
97-
logger.info(rpe_result.pretty_str())
9885

9986
# Restrict trajectories to delta ids for further processing steps.
10087
if support_loop:

evaluation_script/evo_script.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,14 @@
11
import io
2-
import os
3-
import tempfile
42
from pathlib import Path
53
from typing import Dict, IO, Optional, Union, List
64
import numpy as np
7-
import pandas as pd
85
import yaml
96
import csv
107
from evo.core import sync
118
from evo.core.trajectory import PoseTrajectory3D
129
from evo.core.trajectory import Plane
1310
from evo.core.metrics import PoseRelation, Unit
1411
# from evo.tools import file_interface
15-
# import evo.main_ape as main_ape
16-
# import evo.main_rpe as main_rpe
1712
from .evo_ape_fork import ape
1813
from .evo_rpe_fork import rpe
1914
class FileInterfaceException(Exception):
@@ -159,14 +154,6 @@ def update_config(self, cfg: Union[str, Dict]) -> None:
159154
raise TypeError("config must be dict or path to YAML/JSON file")
160155
self.config.update(cfg)
161156

162-
# ------------------------------------------------------------------
163-
# Main entry point
164-
# ------------------------------------------------------------------
165-
166-
# ------------------------------------------------------------------
167-
# Internal implementation
168-
# ------------------------------------------------------------------
169-
170157
# -- compute --------------------------------------------------------
171158

172159
def evaluate(self, traj_ref: str, traj_est: str) -> Dict[str, float]:

0 commit comments

Comments
 (0)