-
Notifications
You must be signed in to change notification settings - Fork 1
ci: Add readability assessment to promptfoo GHA workflow #313
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from 5 commits
Commits
Show all changes
7 commits
Select commit
Hold shift + click to select a range
1779f26
feat: Add readability python assertion using TextDescriptives
fg-nava d12b80d
fix: path/to/ the readbility file
fg-nava bf17db6
fix: only run python assertion for tagged questions
fg-nava b007836
Merge branch 'main' into fg/add-readability-promptfoo
fg-nava 2cea951
fix: PR comment when event is pull_request
fg-nava 29b1b06
fix: add solutions to YLs comments
fg-nava a1ba504
fix: add back required context param
fg-nava File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,108 @@ | ||
from typing import Dict, Union, Any | ||
import textdescriptives as td | ||
import numpy as np | ||
|
||
def get_assert(output: str, context) -> Union[bool, float, Dict[str, Any]]: | ||
""" | ||
Assess the readability of the output text using TextDescriptives instead of py-readability-metrics. | ||
Returns a GradingResult with component scores for different readability metrics. | ||
""" | ||
print("=== TEXTDESCRIPTIVES READABILITY ASSESSMENT STARTING ===") | ||
print(f"Output to assess: {output}") | ||
|
||
try: | ||
if not output or len(output.strip()) == 0: | ||
return { | ||
'pass': False, | ||
'score': 0.0, | ||
'reason': 'Empty or invalid output text' | ||
} | ||
|
||
# Use TextDescriptives to calculate readability metrics | ||
metrics_df = td.extract_metrics( | ||
text=output, | ||
spacy_model="en_core_web_sm", | ||
metrics=["readability"] | ||
) | ||
|
||
# Extract the readability metrics and convert from numpy types to Python native types | ||
flesch_reading_ease = float(metrics_df["flesch_reading_ease"].iloc[0]) | ||
flesch_kincaid_grade = float(metrics_df["flesch_kincaid_grade"].iloc[0]) | ||
gunning_fog = float(metrics_df["gunning_fog"].iloc[0]) | ||
coleman_liau_index = float(metrics_df["coleman_liau_index"].iloc[0]) | ||
fg-nava marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
# Set thresholds for readability | ||
MAX_GRADE_LEVEL = 12.0 # Maximum acceptable grade level (high school) | ||
MIN_FLESCH_EASE = 50.0 # Minimum acceptable Flesch Reading Ease score | ||
fg-nava marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
# Calculate average grade level from metrics | ||
grade_levels = [flesch_kincaid_grade, gunning_fog, coleman_liau_index] | ||
avg_grade_level = sum(grade_levels) / len(grade_levels) | ||
|
||
# Determine if the text passes readability requirements | ||
passes_grade_level = bool(avg_grade_level <= MAX_GRADE_LEVEL) | ||
passes_flesch_ease = bool(flesch_reading_ease >= MIN_FLESCH_EASE) | ||
|
||
# Calculate normalized score (0-1) | ||
grade_level_score = float(max(0, 1 - (avg_grade_level / (MAX_GRADE_LEVEL * 1.5)))) | ||
flesch_ease_score = float(flesch_reading_ease / 100.0) | ||
|
||
# Overall score is average of both metrics | ||
overall_score = float((grade_level_score + flesch_ease_score) / 2) | ||
|
||
# Ensure all values are standard Python types, not numpy types | ||
def numpy_to_python(obj): | ||
if isinstance(obj, np.integer): | ||
return int(obj) | ||
elif isinstance(obj, np.floating): | ||
return float(obj) | ||
elif isinstance(obj, np.ndarray): | ||
return obj.tolist() | ||
elif isinstance(obj, np.bool_): | ||
return bool(obj) | ||
elif isinstance(obj, dict): | ||
return {k: numpy_to_python(v) for k, v in obj.items()} | ||
elif isinstance(obj, list): | ||
return [numpy_to_python(i) for i in obj] | ||
else: | ||
return obj | ||
fg-nava marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
# Return comprehensive grading result | ||
result = { | ||
'pass': passes_grade_level and passes_flesch_ease, | ||
'score': overall_score, | ||
'reason': f'Readability assessment: Average grade level: {avg_grade_level:.1f}, Flesch ease: {flesch_reading_ease:.1f}', | ||
'componentResults': [ | ||
{ | ||
'pass': passes_grade_level, | ||
'score': grade_level_score, | ||
'reason': f'Grade Level (target ≤ {MAX_GRADE_LEVEL}): {avg_grade_level:.1f}' | ||
}, | ||
{ | ||
'pass': passes_flesch_ease, | ||
'score': flesch_ease_score, | ||
'reason': f'Flesch Reading Ease (target ≥ {MIN_FLESCH_EASE}): {flesch_reading_ease:.1f}' | ||
} | ||
], | ||
'namedScores': { | ||
'flesch_kincaid_grade': flesch_kincaid_grade, | ||
'flesch_ease': flesch_reading_ease, | ||
'gunning_fog_grade': gunning_fog, | ||
'coleman_liau_grade': coleman_liau_index, | ||
'avg_grade_level': avg_grade_level | ||
} | ||
} | ||
|
||
# Convert any remaining numpy types to Python native types | ||
result = numpy_to_python(result) | ||
fg-nava marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
print("Assessment result:", result) | ||
return result | ||
|
||
except Exception as e: | ||
print(f"Error in readability assessment: {str(e)}") | ||
return { | ||
'pass': False, | ||
'score': 0.0, | ||
fg-nava marked this conversation as resolved.
Show resolved
Hide resolved
|
||
'reason': f'Error in readability assessment: {str(e)}' | ||
fg-nava marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.