Skip to content

Commit 43c66e7

Browse files
committed
feat: improving logging coverage at main decision points
1 parent b804e41 commit 43c66e7

File tree

6 files changed

+170
-39
lines changed

6 files changed

+170
-39
lines changed

src/earth_reach/cli.py

Lines changed: 34 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -210,15 +210,11 @@ def generate(
210210
ValueError: If arguments are invalid or conflicting
211211
RuntimeError: If description generation fails
212212
"""
213+
logger.info("Starting description generation...")
213214
try:
214-
if verbose:
215-
logger.info(f"Validating image: {image_path}")
216215
validated_image_path = validate_image_path(image_path)
217216
image = Image.open(validated_image_path)
218217

219-
if verbose:
220-
logger.info("Resolving prompts...")
221-
222218
system_prompt_text = resolve_prompt(
223219
system_prompt,
224220
system_prompt_file_path,
@@ -237,16 +233,23 @@ def generate(
237233
if verbose:
238234
if system_prompt_text:
239235
logger.info(
240-
f"System prompt length: {len(system_prompt_text)} characters",
236+
"System prompt length: %d characters", len(system_prompt_text)
241237
)
242238

243239
if user_prompt_text:
244240
logger.info(
245-
f"User prompt length: {len(user_prompt_text)} characters",
241+
"User prompt length: %d characters", len(user_prompt_text)
246242
)
247243

248-
if verbose:
249-
logger.info("Initializing LLM...")
244+
logger.debug(
245+
"CLI configuration for generation",
246+
extra={
247+
"provider": os.getenv("LLM_PROVIDER", "groq"),
248+
"simple_mode": simple,
249+
"max_iterations": max_iterations,
250+
"criteria_threshold": criteria_threshold,
251+
},
252+
)
250253
llm = create_llm()
251254

252255
if verbose:
@@ -278,7 +281,7 @@ def generate(
278281
)
279282

280283
if verbose:
281-
logger.info(f"Generating description for: {validated_image_path.name}")
284+
logger.info("Generating description for: %s", validated_image_path.name)
282285

283286
if simple:
284287
description = generator.generate(
@@ -290,21 +293,21 @@ def generate(
290293

291294
if verbose and isinstance(description, str):
292295
logger.info("Description generated successfully!")
293-
logger.info(f"Description length: {len(description)} characters")
296+
logger.info("Description length: %d characters", len(description))
294297
logger.info("-" * 50)
295298

296299
print(description)
297300

298301
return
299302

300303
except (OSError, FileNotFoundError, ValueError) as e:
301-
logger.error(f"Could not load image file: {e}", exc_info=True)
304+
logger.error("Could not load image file: %s", e, exc_info=True)
302305
sys.exit(1)
303306
except RuntimeError as e:
304-
logger.error(f"Generation failed: {e}", exc_info=True)
307+
logger.error("Generation failed: %s", e, exc_info=True)
305308
sys.exit(1)
306309
except Exception as e:
307-
logger.error(f"Unexpected error: {e}", exc_info=True)
310+
logger.error("Unexpected error: %s", e, exc_info=True)
308311
sys.exit(1)
309312

310313
@staticmethod
@@ -333,19 +336,14 @@ def evaluate(
333336
ValueError: If arguments are invalid or conflicting
334337
RuntimeError: If evaluation fails
335338
"""
339+
logger.info("Starting description evaluation...")
340+
336341
if criteria is None:
337342
criteria = ["coherence", "fluency", "consistency", "relevance"]
338-
339343
try:
340-
if verbose:
341-
logger.info(f"Validating image: {image_path}")
342-
343344
validated_image_path = validate_image_path(image_path)
344345
image = Image.open(validated_image_path)
345346

346-
if verbose:
347-
logger.info("Resolving description...")
348-
349347
description_text = resolve_description(
350348
description,
351349
description_file_path,
@@ -356,7 +354,7 @@ def evaluate(
356354
)
357355

358356
if verbose:
359-
logger.info(f"Description length: {len(description_text)} characters")
357+
logger.info("Description length: %d characters", len(description_text))
360358

361359
if not criteria or len(criteria) == 0:
362360
raise ValueError("Criteria list cannot be empty.")
@@ -369,10 +367,18 @@ def evaluate(
369367
)
370368

371369
if verbose:
372-
logger.info(f"Evaluation criteria: {', '.join(criteria)}")
370+
logger.info("Evaluation criteria: %s", ", ".join(criteria))
373371

374372
if verbose:
375373
logger.info("Initializing LLM...")
374+
375+
logger.debug(
376+
"CLI configuration for evaluation",
377+
extra={
378+
"provider": os.getenv("LLM_PROVIDER", "groq"),
379+
"criteria": criteria,
380+
},
381+
)
376382
llm = create_llm()
377383

378384
if verbose:
@@ -383,15 +389,15 @@ def evaluate(
383389
)
384390

385391
if verbose:
386-
logger.info(f"Evaluating description for: {validated_image_path.name}")
392+
logger.info("Evaluating description for: %s", validated_image_path.name)
387393
evaluation = evaluator.evaluate(
388394
description=description_text,
389395
image=image,
390396
)
391397

392398
if verbose:
393399
logger.info("Evaluation completed successfully!")
394-
logger.info(f"Number of criteria evaluated: {len(evaluation)}")
400+
logger.info("Number of criteria evaluated: %d", len(evaluation))
395401
logger.info("-" * 50)
396402

397403
for eval in evaluation:
@@ -404,13 +410,13 @@ def evaluate(
404410
return
405411

406412
except FileNotFoundError as e:
407-
logger.error(f"File not found: {e}", exc_info=True)
413+
logger.error("File not found: %s", e, exc_info=True)
408414
sys.exit(1)
409415
except ValueError as e:
410-
logger.error(f"Invalid input: {e}", exc_info=True)
416+
logger.error("Invalid input: %s", e, exc_info=True)
411417
sys.exit(1)
412418
except Exception as e:
413-
logger.error(f"Evaluation failed: {e}", exc_info=True)
419+
logger.error("Evaluation failed: %s", e, exc_info=True)
414420
sys.exit(1)
415421

416422

src/earth_reach/core/evaluator.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,6 @@ def evaluate(
8383
"Only one of 'figure' or 'image' can be provided, not both.",
8484
)
8585
if figure is not None:
86-
# TODO(medium): If metadata extraction fails, continue without it
8786
metadata = self._get_metadata_from_figure(figure)
8887
self.user_prompt = self._update_user_prompt_with_metadata(
8988
self.user_prompt,
@@ -409,13 +408,23 @@ def evaluate(
409408
try:
410409
evaluations = []
411410
for evaluator in self.evaluators:
411+
logger.debug("Evaluating criterion: %s", evaluator.criterion)
412412
result = evaluator.evaluate(
413413
description=description,
414414
figure=figure,
415415
image=image,
416416
)
417+
logger.debug(
418+
"Criterion evaluation completed",
419+
extra={
420+
"criterion": result.name,
421+
"score": result.score,
422+
"max_score": 5,
423+
},
424+
)
417425
evaluations.append(result)
418426

427+
logger.info("Evaluator successfully evaluated the description")
419428
return evaluations
420429
except Exception as e:
421430
raise RuntimeError(f"Failed to evaluate description: {e}") from e

src/earth_reach/core/generator.py

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,11 @@
1515
from PIL import Image
1616
from PIL.ImageFile import ImageFile
1717

18+
from earth_reach.config.logging import get_logger
1819
from earth_reach.core.llm import LLMInterface
1920

21+
logger = get_logger(__name__)
22+
2023

2124
@dataclass
2225
class FigureMetadata:
@@ -164,7 +167,6 @@ def generate(
164167
"Only one of 'figure' or 'image' can be provided, not both.",
165168
)
166169
if figure is not None:
167-
# TODO(medium): If metadata extraction fails at any point, generate image and continue without figure metadata
168170
metadata = self._get_metadata_from_figure(figure)
169171
self.user_prompt = self._update_user_prompt_with_metadata(
170172
self.user_prompt,
@@ -183,8 +185,16 @@ def generate(
183185
image=image,
184186
)
185187

188+
logger.debug("Parsing LLM response for structured output")
186189
parsed_output = self.parse_llm_response(response)
187190
if not parsed_output.is_complete():
191+
logger.warning(
192+
"LLM response parsing incomplete",
193+
extra={
194+
"missing_fields": parsed_output.get_missing_fields(),
195+
"total_fields": len(list(fields(parsed_output))),
196+
},
197+
)
188198
raise ValueError(
189199
"Parsed output is incomplete. Missing fields: "
190200
f"{parsed_output.get_missing_fields()}",
@@ -200,6 +210,7 @@ def generate(
200210
except Exception as e:
201211
raise RuntimeError(f"Failed to generate response: {e}") from e
202212

213+
logger.info("Generator successfully generated a description")
203214
return description
204215

205216
def parse_llm_response(self, response: str) -> GeneratorOutput:
@@ -261,6 +272,17 @@ def _get_metadata_from_figure(self, figure: ekp.Figure) -> FigureMetadata:
261272
metadata.xlabel = axes[0].get_xlabel()
262273
metadata.ylabel = axes[0].get_ylabel()
263274
metadata.domain = figure._domain
275+
276+
logger.debug(
277+
"Extracted metadata from figure",
278+
extra={
279+
"title": metadata.title or "None",
280+
"domain": metadata.domain or "None",
281+
"xlabel": metadata.xlabel or "None",
282+
"ylabel": metadata.ylabel or "None",
283+
},
284+
)
285+
264286
return metadata
265287

266288
def _update_user_prompt_with_metadata(

src/earth_reach/core/llm.py

Lines changed: 64 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,20 @@
1616
from google.genai import types
1717
from PIL.ImageFile import ImageFile
1818

19+
from earth_reach.config.logging import get_logger
1920
from earth_reach.core.utils import img_to_base64, img_to_bytes
2021

22+
logger = get_logger(__name__)
23+
2124

2225
class LLMInterface(ABC):
2326
"""Abstract base class defining the interface for all LLM provider implementations."""
2427

28+
@property
29+
@abstractmethod
30+
def provider_name(self) -> str:
31+
"""Must be implemented by subclasses to return the provider name."""
32+
2533
@abstractmethod
2634
def generate(
2735
self,
@@ -73,6 +81,10 @@ def __init__(
7381
api_key=api_key,
7482
)
7583

84+
@property
85+
def provider_name(self):
86+
return "openAICompatible"
87+
7688
def generate(
7789
self,
7890
user_prompt: str,
@@ -139,13 +151,31 @@ def generate(
139151
"The generated response content is empty or not a string"
140152
)
141153

154+
logger.info(
155+
"LLM API call completed successfully",
156+
extra={
157+
"provider": self.provider_name,
158+
"model": self.model_name,
159+
"input_length": len(user_prompt),
160+
"output_length": len(content),
161+
"has_image": image is not None,
162+
},
163+
)
164+
142165
return content.strip()
143166

144167
except ValueError:
145168
raise
146169
except Exception as e:
147-
error_msg = f"API call failed: {type(e).__name__}: {e}"
148-
raise RuntimeError(error_msg) from e
170+
logger.error(
171+
"LLM API call failed",
172+
extra={
173+
"provider": self.provider_name,
174+
"model": self.model_name,
175+
},
176+
exc_info=True,
177+
)
178+
raise RuntimeError("LLM API call failed") from e
149179

150180
def __repr__(self) -> str:
151181
return f"LLM(model_name={self.model_name}, base_url={self.base_url})"
@@ -180,6 +210,10 @@ def __init__(self, model_name: str, api_key: str | None = None) -> None:
180210
base_url="https://api.groq.com/openai/v1",
181211
)
182212

213+
@property
214+
def provider_name(self):
215+
return "groq"
216+
183217

184218
class OpenAILLM(OpenAICompatibleLLM):
185219
"""Implementation of the LLMInterface for OpenAI API Provider."""
@@ -210,6 +244,10 @@ def __init__(self, model_name: str, api_key: str | None = None) -> None:
210244
base_url="https://api.openai.com/v1",
211245
)
212246

247+
@property
248+
def provider_name(self):
249+
return "openAI"
250+
213251

214252
class GeminiLLM(LLMInterface):
215253
"""Implementation of the LLMInterface for Google Gemini API Provider."""
@@ -236,6 +274,10 @@ def __init__(self, model_name: str, api_key: str | None = None) -> None:
236274
self.api_key = api_key
237275
self.client = genai.Client(api_key=api_key)
238276

277+
@property
278+
def provider_name(self):
279+
return "gemini"
280+
239281
def generate(
240282
self,
241283
user_prompt: str,
@@ -296,13 +338,31 @@ def generate(
296338
"The generated response content is empty or not a string"
297339
)
298340

341+
logger.info(
342+
"LLM API call completed successfully",
343+
extra={
344+
"provider": self.provider_name,
345+
"model": self.model_name,
346+
"input_length": len(user_prompt),
347+
"output_length": len(content),
348+
"has_image": image is not None,
349+
},
350+
)
351+
299352
return content.strip()
300353

301354
except ValueError:
302355
raise
303356
except Exception as e:
304-
error_msg = f"API call failed: {type(e).__name__}: {e}"
305-
raise RuntimeError(error_msg) from e
357+
logger.error(
358+
"LLM API call failed",
359+
extra={
360+
"provider": self.provider_name,
361+
"model": self.model_name,
362+
},
363+
exc_info=True,
364+
)
365+
raise RuntimeError("LLM API call failed") from e
306366

307367
def __repr__(self) -> str:
308368
return f"GeminiLLM(model_name={self.model_name})"

0 commit comments

Comments
 (0)