File size: 1,831 Bytes
9fec341
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import google.generativeai as genai
from prompts.prompts import PromptGenerator
from dotenv import load_dotenv
load_dotenv
import os
api_key = os.getenv("GOOGLE_API_KEY")
class AnswerEvaluator:
    def __init__(self, api_key):
        """Initialize with the Gemini API key."""
        genai.configure(api_key=api_key)
        self.model = genai.GenerativeModel("gemini-pro")

    def evaluate_answers(self, all_answers):
        """Evaluates answers based on their relevance, accuracy, and clarity."""
        prompt = self._construct_prompt(all_answers)

        try:
            # Evaluate the answers using Gemini's model
            response = self.model.generate_content(
                prompt
            )

            # Return the response generated by the model
            return response.text.strip()

        except Exception as e:
            return f"Error in evaluation: {str(e)}"

    def _construct_prompt(self, all_answers):
        """Constructs a detailed prompt for the Gemini model."""
        prompt = PromptGenerator()
        evaluation_prompt = prompt.evaluate_prompt()

        for idx, item in enumerate(all_answers, 1):
            evaluation_prompt += f"\n{idx}. Question: {item['question']}\n   Answer: {item['answer']}\n"

        evaluation_prompt += "\nProvide your detailed evaluation below:"
        return evaluation_prompt


# Initialize with your Gemini API key
api_key = os.getenv("GOOGLE_API_KEY")
evaluator = AnswerEvaluator(api_key)

# Example answers
all_answers = [
    {"question": "What is machine learning?", "answer": "Machine learning is a subset of AI."},
    {"question": "Explain logistic regression.", "answer": "It is used to classify data points into classes."},
]

# Evaluate the answers
evaluation_result = evaluator.evaluate_answers(all_answers)
print(evaluation_result)