diff --git a/src/ext/openai/openai.ts b/src/ext/openai/openai.ts index 894baf5..f89453c 100644 --- a/src/ext/openai/openai.ts +++ b/src/ext/openai/openai.ts @@ -17,11 +17,11 @@ const logger = createLogger(); const queryOpenAI = async (prompt: string): Promise => { try { - const response = await openai.completions.create({ - model: 'gpt-3.5-turbo-instruct', - prompt, + const response = await openai.chat.completions.create({ + model: 'gpt-4o-mini', + messages: [{ role: 'user', content: prompt }], max_tokens: 250, - temperature: 0.7, + temperature: 0.2, }); logger.debug( @@ -29,7 +29,11 @@ const queryOpenAI = async (prompt: string): Promise => { JSON.stringify(response, null, 2) ); - const result = response.choices[0].text.trim(); + const result = response.choices?.[0]?.message?.content?.trim() ?? ''; + + if (result === '') { + throw new Error('Empty response from OpenAI'); + } return result; } catch (error) { @@ -50,7 +54,7 @@ export const requestEvaluation = async ( ); const result = await queryOpenAI(prompt); logger.info('Application evaluation complete', { result }); - return JSON.parse(result); + return JSON.parse(removeJsonCodeBlocks(result)); }; export const requestEvaluationQuestions = async ( @@ -60,8 +64,11 @@ export const requestEvaluationQuestions = async ( const prompt: string = createEvaluationQuestionPrompt(roundMetadata); const result = await queryOpenAI(prompt); logger.info('Received evaluation questions from OpenAI', { result }); - return result - .split('\n') - .map(line => line.replace(/^\d+\.\s*/, '').trim()) - .filter(line => line.length > 0); + return JSON.parse(removeJsonCodeBlocks(result)).map(line => + line.replace(/^\d+\.\s*/, '').trim() + ); +}; + +const removeJsonCodeBlocks = (str: string): string => { + return str.replace(/```json/g, '').replace(/```/g, ''); }; diff --git a/src/ext/openai/prompt.ts b/src/ext/openai/prompt.ts index 9565dd1..074c426 100644 --- a/src/ext/openai/prompt.ts +++ b/src/ext/openai/prompt.ts @@ -42,42 +42,41 @@ const sanitizeRoundMetadata = (metadata: RoundMetadata): string => { return sanitizeAndReduceMetadata(metadata, essentialRoundFields, 1000); }; -const sanitizeApplicationMetadata = (metadata: ApplicationMetadata): string => { - return sanitizeAndReduceMetadata(metadata, essentialApplicationFields, 1000); +const sanitizeApplicationMetadata = (metadata: object): string => { + return sanitizeAndReduceMetadata(metadata, essentialApplicationFields, 3000); }; - export const createAiEvaluationPrompt = ( roundMetadata: RoundMetadata, applicationMetadata: ApplicationMetadata, applicationQuestions: PromptEvaluationQuestions ): string => { const sanitizedRoundMetadata = sanitizeRoundMetadata(roundMetadata); - const sanitizedApplicationMetadata = - sanitizeApplicationMetadata(applicationMetadata); + const sanitizedApplicationMetadata = sanitizeApplicationMetadata( + applicationMetadata.application.project + ); const questionsString = applicationQuestions .map((q, index) => `${index + 1}. ${q}`) .join('\n'); return `Evaluate the following application based on the round metadata and project metadata: - round: ${sanitizedRoundMetadata}, - project: ${sanitizedApplicationMetadata} - - Please answer the following questions to evaluate the application: - ${questionsString} + round: ${sanitizedRoundMetadata}, + project: ${sanitizedApplicationMetadata} - Ensure that the project and application are not spam or scams and that the project looks legitimate. Please consider all available evidence and reasoning to assess legitimacy carefully. Avoid answering "yes" to all questions unless there is strong justification. + Please consider the following questions when evaluating the application and be very strict with your evaluation: + ${questionsString} + Your question answers should NOT be 'yes', 'no', or 'uncertain'. Please use always 0 for 'yes', 1 for 'no', and 2 for 'uncertain'. Please respond with ONLY the following JSON structure and NOTHING else: { "questions": [ { - "questionIndex": number, // index of the question from the provided list (starting from 0) - "answerEnum": number, // 0 for "yes", 1 for "no", 2 for "uncertain" + "questionIndex": number, // index of the question from the provided list (IMPORTANT: starting from 0 to ${applicationQuestions.length - 1}) + "answerEnum": number, // VERY IMPORTANT: 0 for "yes", 1 for "no", 2 for "uncertain" }, ... ], - "summary": string // a detailed summary of the evaluation, including reasoning for 'yes', 'no', or 'uncertain' answers. + "summary": string // a detailed summary of the evaluation, including reasoning for 'yes', 'no', or 'uncertain' answers. Max 800 characters. } `; }; @@ -85,23 +84,14 @@ export const createAiEvaluationPrompt = ( export const createEvaluationQuestionPrompt = ( roundMetadata: RoundMetadata ): string => { + console.log('roundMetadata', roundMetadata); return `Given the following description of a Grants round, generate 5 evaluation questions that a reviewer can answer with 'Yes', 'No', or 'Uncertain'. The questions should help assess the projects in this round, focusing on the following key aspects: - - **Legitimacy**: Does the project demonstrate trustworthiness, transparency, or verifiable sources? - - **Alignment with Round Goals**: Does the project align with the specific goals and objectives of the round as described in the metadata? - - **Feasibility and Sustainability**: Is the project feasible and sustainable over time? Does it have a realistic plan for implementation and long-term success? - - **Impact**: Will the project have a measurable, positive impact on the target community or cause? - - **Sustainability**: Does the project have a plan for continued success or impact after the initial grant funding ends? - Ensure that each question is focused on one specific aspect of evaluation, is clear and concise, and can be answered with 'Yes', 'No', or 'Uncertain'. Also, avoid overly simplistic or binary questions unless they are critical for assessing eligibility. The questions should help a reviewer evaluate the project in a thoughtful, critical, and fair manner. - Grants Round Description: ${sanitizeRoundMetadata(roundMetadata)} + Please create the questions based on the eligibility description and requirements of the round. - Examples of Evaluation Questions (These should be ignored while creating the new questions, and are only to be considered as examples of format): - - Does the project have a clear and actionable plan for achieving its goals? - - Is there any evidence of the project's legitimacy, such as an established team or verifiable partners? - - Will the project have a measurable impact on the community it aims to serve? - - Does the project have a strategy for maintaining its impact over the long term? + Grants Round Description: ${sanitizeRoundMetadata(roundMetadata)}. Return the evaluation questions as a string array.`; }; diff --git a/src/ext/openai/types.ts b/src/ext/openai/types.ts index 5edafbf..462fba0 100644 --- a/src/ext/openai/types.ts +++ b/src/ext/openai/types.ts @@ -1,4 +1,4 @@ -import { type ApplicationMetadata, type RoundMetadata } from '../indexer/types'; +import { type RoundMetadata } from '../indexer/types'; export type PromptEvaluationQuestions = string[]; @@ -8,6 +8,4 @@ export const essentialRoundFields: Array = [ 'eligibility', ]; -export const essentialApplicationFields: Array = [ - 'application', -]; +export const essentialApplicationFields = ['project', 'answers', 'description'];