Skip to content

Commit 37c4d9c

Browse files
committed
Get some visuals working
1 parent 2fea642 commit 37c4d9c

File tree

2 files changed

+63
-44
lines changed

2 files changed

+63
-44
lines changed

evaluation_script/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def is_package_version_on_pypi(package_name, version=None):
6161

6262
def force_install(package):
6363
try:
64-
subprocess.run([sys.executable,"-m","pip","install" ,"--ignore-requires-python",package])
64+
subprocess.run([sys.executable,"-m","pip","install","--disable-pip-version-check","--ignore-requires-python",package])
6565
except subprocess.CalledProcessError as e:
6666
print(f"Error occurred while installing {package}: {e.stderr}")
6767
sys.stderr.flush()
@@ -81,7 +81,7 @@ def install(package):
8181
# Args:
8282
# package ([str]): Package name with version
8383
try:
84-
subprocess.run([sys.executable,"-m","pip","install",package])
84+
subprocess.run([sys.executable,"-m","pip","install","--disable-pip-version-check",package])
8585
except subprocess.CalledProcessError as e:
8686
print(f"Error occurred while installing {package}: {e.stderr}")
8787
sys.stderr.flush()
@@ -115,7 +115,7 @@ def install(package):
115115
install("colorama")
116116
install("pillow")
117117
install("pykitti") # Might install additional light deps
118-
install("rosbags")
118+
# install("rosbags")
119119
# is_package_version_on_pypi("natsort")
120120
install("natsort")
121121
install("lz4")

evaluation_script/main.py

Lines changed: 60 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,14 @@
77

88

99
def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwargs):
10+
11+
print("\n" + "=" * 80)
12+
print("🔁 NEW RUN STARTING")
13+
print("=" * 80 + "\n")
14+
sys.stdout.flush()
15+
1016
# script_dir = os.path.dirname(os.path.abspath(__file__))
11-
print("\033[92mStarting Evaluation.....\033[0m")
17+
print("Starting Evaluation.....")
1218
# print(kwargs['submission_metadata'])
1319
output = {}
1420
# evaluated_metrics = []
@@ -121,6 +127,7 @@ def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwarg
121127

122128

123129
print("\033[91mPossible error\033[0m", file=sys.stderr)
130+
print("❌ Fatal error while parsing", file=sys.stderr)
124131

125132
output["result"] = [
126133
{
@@ -153,6 +160,17 @@ def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwarg
153160
},
154161
]
155162

163+
print("🔧 Installing dependencies...")
164+
print("📥 Loading annotation file...")
165+
print("🧪 Evaluating predictions...")
166+
print("📈 Accuracy: 92.3%")
167+
print("✅ Evaluation complete!")
168+
169+
print("⚠️ Warning: trajectory misaligned", file=sys.stderr)
170+
print("❌ Evaluation failed due to missing file", file=sys.stderr)
171+
172+
print("\n" + "🧵" * 20 + " LOG START " + "🧵" * 20)
173+
156174
# for i, eval_result in enumerate(evaluated_metrics):
157175
# metrics = eval_result["metrics"]
158176
# # Use filename or index to create split names
@@ -172,46 +190,47 @@ def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwarg
172190
# If EvalAI expects a specific structure like the original one,
173191
# you might need to aggregate or select specific results.
174192
# For now, let's keep the first split's results for submission_result as an example.
175-
if output["result"]:
176-
total_ate = 0
177-
total_rte = 0
178-
total_le = 0
179-
count = 0
180-
181-
for split_result in output["result"]:
182-
# Each split_result is a dict like {"split_name": {"ATE": x, "RTE": y, "LE": z}}
183-
# Get the inner metrics dictionary (assuming only one key per outer dict)
184-
split_name = list(split_result.keys())[0]
185-
metrics = split_result[split_name]
186-
187-
# Accumulate metrics if they exist and are not None
188-
if metrics.get("ATE") is not None:
189-
total_ate += metrics["ATE"]
190-
if metrics.get("RTE") is not None:
191-
total_rte += metrics["RTE"]
192-
if metrics.get("LE") is not None:
193-
total_le += metrics["LE"]
194-
count += 1 # Increment count for each split processed
195-
196-
# Calculate averages, handle division by zero if count is 0
197-
if count > 0:
198-
avg_ate = total_ate / count
199-
avg_rte = total_rte / count
200-
avg_le = total_le / count
201-
output["submission_result"] = {
202-
"ATE": avg_ate,
203-
"RTE": avg_rte,
204-
"LE": avg_le
205-
}
206-
else:
207-
# Handle case with no valid metrics found
208-
output["submission_result"] = {
209-
"ATE": None,
210-
"RTE": None,
211-
"LE": None
212-
}
213-
else:
214-
output["submission_result"] = {} # Handle case with no evaluated metrics
193+
# if output["result"]:
194+
# total_ate = 0
195+
# total_rte = 0
196+
# total_le = 0
197+
# count = 0
198+
199+
# for split_result in output["result"]:
200+
# # Each split_result is a dict like {"split_name": {"ATE": x, "RTE": y, "LE": z}}
201+
# # Get the inner metrics dictionary (assuming only one key per outer dict)
202+
# split_name = list(split_result.keys())[0]
203+
# metrics = split_result[split_name]
204+
205+
# # Accumulate metrics if they exist and are not None
206+
# if metrics.get("ATE") is not None:
207+
# total_ate += metrics["ATE"]
208+
# if metrics.get("RTE") is not None:
209+
# total_rte += metrics["RTE"]
210+
# if metrics.get("LE") is not None:
211+
# total_le += metrics["LE"]
212+
# count += 1 # Increment count for each split processed
213+
214+
# # Calculate averages, handle division by zero if count is 0
215+
# if count > 0:
216+
# avg_ate = total_ate / count
217+
# avg_rte = total_rte / count
218+
# avg_le = total_le / count
219+
# output["submission_result"] = {
220+
# "ATE": avg_ate,
221+
# "RTE": avg_rte,
222+
# "LE": avg_le
223+
# }
224+
# else:
225+
# # Handle case with no valid metrics found
226+
# output["submission_result"] = {
227+
# "ATE": None,
228+
# "RTE": None,
229+
# "LE": None
230+
# }
231+
# else:
232+
# output["submission_result"] = {} # Handle case with no evaluated metrics
233+
output["submission_result"] = output["result"][0]["heap"]
215234
# Placeholder for submission metadata based on the requested format.
216235
# Actual values should be populated based on evaluation results if applicable.
217236
output["submission_metadata"] = json.dumps(kwargs['submission_metadata'])

0 commit comments

Comments
 (0)