File size: 4,568 Bytes
bfc1cf6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5c2b048
bfc1cf6
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# app.py
import os
import gradio as gr
from anthropic import Anthropic
from datetime import datetime, timedelta
from collections import deque

# Initialize Anthropic client - will use the secret key from HuggingFace
anthropic = Anthropic(
    api_key=os.environ.get('ANTHROPIC_API_KEY')
)

# Request tracking
MAX_REQUESTS_PER_DAY = 25  # Conservative limit to start
request_history = deque(maxlen=1000)

def check_api_key():
    """Verify API key is configured"""
    if not os.environ.get('ANTHROPIC_API_KEY'):
        raise ValueError("Anthropic API key not found. Please configure it in HuggingFace Spaces settings.")

def check_rate_limit():
    """Check if we're within rate limits"""
    now = datetime.now()
    # Remove requests older than 24 hours
    while request_history and (now - request_history[0]) > timedelta(days=1):
        request_history.popleft()
    return len(request_history) < MAX_REQUESTS_PER_DAY

def clean_latex(text):
    """Simple LaTeX cleaning"""
    text = text.replace('\n', '\n\n')
    return text

def generate_test(subject):
    """Generate a math test with error handling and rate limiting"""
    try:
        # Check API key
        check_api_key()
        
        # Check rate limit
        if not check_rate_limit():
            return "Daily request limit reached. Please try again tomorrow."
        
        # Record request
        request_history.append(datetime.now())
        
        system_prompt = """You will write math exam questions. Follow these requirements EXACTLY:
        1. Write exactly 3 challenging university-level questions
        2. For LaTeX math formatting:
           - Use $ for simple inline math
           - For equations and solution steps, use $$ on separate lines
           - For multi-step solutions, put each step on its own line in $$ $$
           - DO NOT use \\begin{aligned} or any other environments
        3. Number each question as 1), 2), 3)
        4. Include solutions after each question
        5. Keep formatting simple and clear"""
        
        message = anthropic.messages.create(
            model="claude-3-opus-20240229",
            max_tokens=1500,
            temperature=0.7,
            messages=[{
                "role": "user",
                "content": f"{system_prompt}\n\nWrite an exam for {subject}."
            }]
        )
        
        # Extract usage information
        input_tokens = message.usage.input_tokens
        output_tokens = message.usage.output_tokens
        input_cost = (input_tokens / 1000) * 0.015
        output_cost = (output_tokens / 1000) * 0.075
        total_cost = input_cost + output_cost
        
        usage_stats = f"""
        \n---\nUsage Statistics:
        • Input Tokens: {input_tokens:,}
        • Output Tokens: {output_tokens:,}
        • Total Tokens: {input_tokens + output_tokens:,}
        
        Cost Breakdown:
        • Input Cost: ${input_cost:.4f}
        • Output Cost: ${output_cost:.4f}
        • Total Cost: ${total_cost:.4f}
        """
        
        if hasattr(message, 'content') and len(message.content) > 0:
            response_text = message.content[0].text
            formatted_response = clean_latex(response_text) + usage_stats
            return formatted_response
        else:
            return "Error: No content in response"
            
    except ValueError as e:
        return f"Configuration Error: {str(e)}"
    except Exception as e:
        return f"Error: {str(e)}"

# Subject choices
subjects = [
    "Single Variable Calculus",
    "Multivariable Calculus", 
    "Linear Algebra",
    "Differential Equations",
    "Real Analysis",
    "Complex Analysis",
    "Abstract Algebra",
    "Probability Theory",
    "Numerical Analysis",
    "Topology"
]

# Create Gradio interface
interface = gr.Interface(
    fn=generate_test,
    inputs=gr.Dropdown(
        choices=subjects,
        label="Select Mathematics Subject",
        info="Choose a subject for the exam questions"
    ),
    outputs=gr.Markdown(
        label="Generated Test",
        latex_delimiters=[
            {"left": "$$", "right": "$$", "display": True},
            {"left": "$", "right": "$", "display": False}
        ]
    ),
    title="Advanced Mathematics Test Generator",
    description="""Generates university-level mathematics exam questions with solutions using Claude 3 Opus.
    Limited to 25 requests per day. Please use responsibly.""",
    theme="default",
    allow_flagging="never"
)

# Launch the interface
if __name__ == "__main__":
    interface.launch()