Skip to content

Commit 2997e73

Browse files
committed
Adds logging configuration and use.
1 parent e5b9611 commit 2997e73

File tree

6 files changed

+31
-13
lines changed

6 files changed

+31
-13
lines changed

README.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,11 @@ starting point. Then set the `OPENAI_API_KEY` variable to a valid OpenAI API key
2020
enable that service. Or, otherwise set that variable the appropriate way when
2121
deploying the service.
2222

23+
To control the logging information, use the `LOG_LEVEL` configuration parameter. Set
24+
to `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL`. The `DEBUG` setting is the
25+
most permissive and shows all logging text. The `CRITICAL` prevents most logging
26+
from happening. Most logging happens at `INFO`, which is the default setting.
27+
2328
## Local Development
2429

2530
All of our server code is written using [Flask](https://flask.palletsprojects.com/en/2.3.x/).

config.txt.sample

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
11
OPENAI_API_KEY=
2+
LOG_LEVEL=INFO

lib/assessment/assess.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import concurrent.futures
55
import io
66
import json
7+
import logging
78

89
# Import our support classes
910
from lib.assessment.config import SUPPORTED_MODELS, VALID_GRADES
@@ -32,10 +33,10 @@ def grade(code, prompt, rubric, api_key='', llm_model='gpt-4', num_responses=1,
3233
if OPENAI_API_KEY:
3334
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
3435
elif not 'OPENAI_API_KEY' in os.environ:
35-
print("Must set OPENAI_API_KEY!")
36+
logging.error("Must set OPENAI_API_KEY!")
3637
return {}
3738
else:
38-
print("Using set OPENAI_API_KEY")
39+
logging.info("Using set OPENAI_API_KEY")
3940

4041
grade = Grade()
4142
return grade.grade_student_work(

lib/assessment/grade.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import csv
44
import time
55
import requests
6+
import logging
67

78
from typing import List, Dict, Any
89
from lib.assessment.config import VALID_GRADES
@@ -42,18 +43,18 @@ def grade_student_work(self, prompt, rubric, student_code, student_id, examples=
4243
try:
4344
response = requests.post(api_url, headers=headers, json=data, timeout=120)
4445
except requests.exceptions.ReadTimeout:
45-
print(f"{student_id} request timed out in {(time.time() - start_time):.0f} seconds.")
46+
logging.error(f"{student_id} request timed out in {(time.time() - start_time):.0f} seconds.")
4647
return None
4748

4849
if response.status_code != 200:
49-
print(f"{student_id} Error calling the API: {response.status_code}")
50-
print(f"{student_id} Response body: {response.text}")
50+
logging.error(f"{student_id} Error calling the API: {response.status_code}")
51+
logging.info(f"{student_id} Response body: {response.text}")
5152
return None
5253

5354
info = response.json()
5455
tokens = info['usage']['total_tokens']
5556
elapsed = time.time() - start_time
56-
print(f"{student_id} request succeeded in {elapsed:.0f} seconds. {tokens} tokens used.")
57+
logging.info(f"{student_id} request succeeded in {elapsed:.0f} seconds. {tokens} tokens used.")
5758

5859
tsv_data_choices = [self.get_tsv_data_if_valid(choice['message']['content'], rubric, student_id, choice_index=index) for index, choice in enumerate(info['choices']) if choice['message']['content']]
5960
tsv_data_choices = [choice for choice in tsv_data_choices if choice]
@@ -107,7 +108,7 @@ def compute_messages(self, prompt, rubric, student_code, examples=[]):
107108
def get_tsv_data_if_valid(self, response_text, rubric, student_id, choice_index=None):
108109
choice_text = f"Choice {choice_index}: " if choice_index is not None else ''
109110
if not response_text:
110-
print(f"{student_id} {choice_text} Invalid response: empty response")
111+
logging.error(f"{student_id} {choice_text} Invalid response: empty response")
111112
return None
112113
text = response_text.strip()
113114

@@ -131,12 +132,12 @@ def get_tsv_data_if_valid(self, response_text, rubric, student_id, choice_index=
131132
lines = text.split('\n')
132133
lines = list(filter(lambda x: "---" not in x, lines))
133134
text = "\n".join(lines)
134-
print("response was markdown and not tsv, delimiting by '|'")
135+
logging.info("response was markdown and not tsv, delimiting by '|'")
135136

136137
tsv_data = list(csv.DictReader(StringIO(text), delimiter='|'))
137138
else:
138139
# Let's assume it is CSV
139-
print("response had no tabs so is not tsv, delimiting by ','")
140+
logging.info("response had no tabs so is not tsv, delimiting by ','")
140141
tsv_data = list(csv.DictReader(StringIO(text), delimiter=','))
141142
else:
142143
# Let's assume it is TSV
@@ -147,7 +148,7 @@ def get_tsv_data_if_valid(self, response_text, rubric, student_id, choice_index=
147148
self.validate_server_response(tsv_data, rubric)
148149
return [row for row in tsv_data]
149150
except InvalidResponseError as e:
150-
print(f"{student_id} {choice_text} Invalid response: {str(e)}\n{response_text}")
151+
logging.error(f"{student_id} {choice_text} Invalid response: {str(e)}\n{response_text}")
151152
return None
152153

153154
def parse_tsv(self, tsv_text):
@@ -210,7 +211,7 @@ def get_consensus_response(self, choices, student_id):
210211
majority_grade = Counter(grades).most_common(1)[0][0]
211212
key_concept_to_majority_grade[key_concept] = majority_grade
212213
if majority_grade != grades[0]:
213-
print(f"outvoted {student_id} Key Concept: {key_concept} first grade: {grades[0]} majority grade: {majority_grade}")
214+
logging.info(f"outvoted {student_id} Key Concept: {key_concept} first grade: {grades[0]} majority grade: {majority_grade}")
214215

215216
key_concept_to_observations = {}
216217
key_concept_to_reason = {}

lib/assessment/rubric_tester.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from multiprocessing import Pool
1010
import concurrent.futures
1111
import io
12+
import logging
1213

1314
from lib.assessment.config import SUPPORTED_MODELS, VALID_GRADES
1415
from lib.assessment.grade import Grade
@@ -194,7 +195,7 @@ def main():
194195
report.generate_html_output(
195196
output_file, prompt, rubric, overall_accuracy, actual_grades, expected_grades, options.passing_grades, accuracy_by_criteria, errors, command_line
196197
)
197-
print(f"main finished in {int(time.time() - main_start_time)} seconds")
198+
logging.info(f"main finished in {int(time.time() - main_start_time)} seconds")
198199

199200
os.system(f"open {output_file}")
200201

src/__init__.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
# Main imports
2-
import os
2+
import os, sys
3+
4+
# Logging
5+
import logging
36

47
# Our modules
58
from src.test import test_routes
@@ -33,6 +36,12 @@ def create_app(test_config=None):
3336
except OSError:
3437
pass
3538

39+
# Set up logging
40+
log_level = os.getenv('LOG_LEVEL', 'INFO')
41+
logging.basicConfig(format='%(asctime)s: %(name)s:%(message)s', level=log_level)
42+
logging.log(100, f"Setting up application. Logging level={log_level}")
43+
logging.basicConfig(format='%(asctime)s: %(levelname)s:%(name)s:%(message)s', level=log_level)
44+
3645
# Index (a simple HTML response that will always succeed)
3746
@app.route('/')
3847
def root():

0 commit comments

Comments
 (0)