vs12345 commited on
Commit
f8cac16
·
verified ·
1 Parent(s): 31138c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +458 -0
app.py CHANGED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import json
4
+ import gradio as gr
5
+ import google.generativeai as genai
6
+
7
+ # Configure Gemini API - For Hugging Face deployment
8
+ GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
9
+ genai.configure(api_key=GEMINI_API_KEY)
10
+
11
+ # Challenge database with different difficulty levels
12
+ challenges = {
13
+ "easy": [
14
+ {
15
+ "id": "e1",
16
+ "title": "Sum of Two Numbers",
17
+ "description": "Write a function that takes two numbers as input and returns their sum.",
18
+ "example_input": "5, 3",
19
+ "example_output": "8",
20
+ "test_cases": [
21
+ {"input": "5, 3", "output": "8"},
22
+ {"input": "10, -5", "output": "5"},
23
+ {"input": "0, 0", "output": "0"}
24
+ ]
25
+ },
26
+ {
27
+ "id": "e2",
28
+ "title": "Even or Odd",
29
+ "description": "Write a function that determines if a number is even or odd.",
30
+ "example_input": "4",
31
+ "example_output": "Even",
32
+ "test_cases": [
33
+ {"input": "4", "output": "Even"},
34
+ {"input": "7", "output": "Odd"},
35
+ {"input": "0", "output": "Even"}
36
+ ]
37
+ },
38
+ {
39
+ "id": "e3",
40
+ "title": "String Reversal",
41
+ "description": "Write a function that reverses a string.",
42
+ "example_input": "hello",
43
+ "example_output": "olleh",
44
+ "test_cases": [
45
+ {"input": "hello", "output": "olleh"},
46
+ {"input": "python", "output": "nohtyp"},
47
+ {"input": "a", "output": "a"}
48
+ ]
49
+ }
50
+ ],
51
+ "medium": [
52
+ {
53
+ "id": "m1",
54
+ "title": "Palindrome Check",
55
+ "description": "Write a function that checks if a string is a palindrome (reads the same backward as forward).",
56
+ "example_input": "racecar",
57
+ "example_output": "True",
58
+ "test_cases": [
59
+ {"input": "racecar", "output": "True"},
60
+ {"input": "hello", "output": "False"},
61
+ {"input": "A man a plan a canal Panama", "output": "True"}
62
+ ]
63
+ },
64
+ {
65
+ "id": "m2",
66
+ "title": "List Comprehension",
67
+ "description": "Write a function that returns a list of all even numbers from 1 to n using list comprehension.",
68
+ "example_input": "10",
69
+ "example_output": "[2, 4, 6, 8, 10]",
70
+ "test_cases": [
71
+ {"input": "10", "output": "[2, 4, 6, 8, 10]"},
72
+ {"input": "5", "output": "[2, 4]"},
73
+ {"input": "1", "output": "[]"}
74
+ ]
75
+ },
76
+ {
77
+ "id": "m3",
78
+ "title": "Fibonacci Sequence",
79
+ "description": "Write a function that returns the nth number in the Fibonacci sequence.",
80
+ "example_input": "6",
81
+ "example_output": "8",
82
+ "test_cases": [
83
+ {"input": "6", "output": "8"},
84
+ {"input": "1", "output": "1"},
85
+ {"input": "10", "output": "55"}
86
+ ]
87
+ }
88
+ ],
89
+ "hard": [
90
+ {
91
+ "id": "h1",
92
+ "title": "Anagram Check",
93
+ "description": "Write a function that determines if two strings are anagrams of each other.",
94
+ "example_input": "listen, silent",
95
+ "example_output": "True",
96
+ "test_cases": [
97
+ {"input": "listen, silent", "output": "True"},
98
+ {"input": "hello, world", "output": "False"},
99
+ {"input": "Astronomer, Moon starer", "output": "True"}
100
+ ]
101
+ },
102
+ {
103
+ "id": "h2",
104
+ "title": "Prime Number Generator",
105
+ "description": "Write a function that generates all prime numbers up to n using the Sieve of Eratosthenes algorithm.",
106
+ "example_input": "30",
107
+ "example_output": "[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]",
108
+ "test_cases": [
109
+ {"input": "30", "output": "[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]"},
110
+ {"input": "10", "output": "[2, 3, 5, 7]"},
111
+ {"input": "2", "output": "[2]"}
112
+ ]
113
+ },
114
+ {
115
+ "id": "h3",
116
+ "title": "Recursive Binary Search",
117
+ "description": "Write a recursive function that performs binary search on a sorted list.",
118
+ "example_input": "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 7",
119
+ "example_output": "6",
120
+ "test_cases": [
121
+ {"input": "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 7", "output": "6"},
122
+ {"input": "[1, 2, 3, 4, 5], 1", "output": "0"},
123
+ {"input": "[1, 3, 5, 7, 9], 4", "output": "-1"}
124
+ ]
125
+ }
126
+ ]
127
+ }
128
+
129
+ # User session data
130
+ user_data = {
131
+ "current_challenge": None,
132
+ "difficulty_level": "easy",
133
+ "correct_answers": 0,
134
+ "total_attempts": 0,
135
+ "solution_history": [] # Store previous solutions for LLM analysis
136
+ }
137
+
138
+ def get_challenge():
139
+ """Get a random challenge based on the current difficulty level"""
140
+ level = user_data["difficulty_level"]
141
+ available_challenges = challenges[level]
142
+ challenge = random.choice(available_challenges)
143
+ user_data["current_challenge"] = challenge
144
+ return challenge
145
+
146
+ def evaluate_code_with_gemini(user_code, challenge):
147
+ """Evaluate the user's code using Gemini API"""
148
+ try:
149
+ # Check if API key is available
150
+ if not GEMINI_API_KEY:
151
+ return {
152
+ "test_results": [],
153
+ "overall_assessment": "API Key Missing",
154
+ "feedback": "The Gemini API key is not configured. Please check the Hugging Face Space settings.",
155
+ "is_correct": False,
156
+ "code_quality_score": 5,
157
+ "algorithm_efficiency_score": 5
158
+ }
159
+
160
+ # Construct the prompt for Gemini
161
+ prompt = f"""
162
+ Evaluate the following Python code solution for the challenge:
163
+
164
+ Challenge: {challenge['title']}
165
+ Description: {challenge['description']}
166
+
167
+ Test Cases:
168
+ {json.dumps(challenge['test_cases'], indent=2)}
169
+
170
+ User's Solution:
171
+ ```python
172
+ {user_code}
173
+ ```
174
+
175
+ Evaluate if the solution correctly solves the challenge based on the test cases.
176
+ Consider:
177
+ 1. Correctness (does it produce the expected output for all test cases?)
178
+ 2. Efficiency (is the solution reasonably efficient?)
179
+ 3. Code quality (is the code well-structured and readable?)
180
+
181
+ For each test case, indicate whether the solution passes or fails.
182
+ Provide a brief explanation of why it passes or fails.
183
+ Finally, provide an overall assessment: is the solution correct (pass all test cases)?
184
+
185
+ Return your response in the following JSON format:
186
+ {{
187
+ "test_results": [
188
+ {{"test_case": "input", "expected": "output", "result": "pass/fail", "explanation": "brief explanation"}}
189
+ ],
190
+ "overall_assessment": "pass/fail",
191
+ "feedback": "brief feedback for the user",
192
+ "is_correct": true/false,
193
+ "code_quality_score": 1-10,
194
+ "algorithm_efficiency_score": 1-10
195
+ }}
196
+
197
+ Ensure your response is valid JSON.
198
+ """
199
+
200
+ # Generate content with Gemini
201
+ model = genai.GenerativeModel('gemini-1.5-pro')
202
+ response = model.generate_content(prompt)
203
+
204
+ # Parse the response
205
+ try:
206
+ result = json.loads(response.text)
207
+ return result
208
+ except json.JSONDecodeError:
209
+ # If Gemini doesn't return valid JSON, provide a fallback response
210
+ return {
211
+ "test_results": [],
212
+ "overall_assessment": "Unable to evaluate",
213
+ "feedback": "There was an issue evaluating your code. Please try again.",
214
+ "is_correct": False,
215
+ "code_quality_score": 5,
216
+ "algorithm_efficiency_score": 5
217
+ }
218
+
219
+ except Exception as e:
220
+ return {
221
+ "test_results": [],
222
+ "overall_assessment": f"Error: {str(e)}",
223
+ "feedback": "There was an error evaluating your code. Please check your syntax and try again.",
224
+ "is_correct": False,
225
+ "code_quality_score": 5,
226
+ "algorithm_efficiency_score": 5
227
+ }
228
+
229
+ def adjust_difficulty_with_llm(user_code, evaluation, challenge):
230
+ """Use LLM to adjust difficulty based on code quality and approach"""
231
+ # Check if API key is available
232
+ if not GEMINI_API_KEY:
233
+ return fallback_difficulty_adjustment(evaluation.get("is_correct", False))
234
+
235
+ # Store the solution in history
236
+ solution_entry = {
237
+ "challenge_id": challenge["id"],
238
+ "difficulty": user_data["difficulty_level"],
239
+ "code": user_code,
240
+ "is_correct": evaluation.get("is_correct", False),
241
+ "code_quality_score": evaluation.get("code_quality_score", 5),
242
+ "algorithm_efficiency_score": evaluation.get("algorithm_efficiency_score", 5)
243
+ }
244
+ user_data["solution_history"].append(solution_entry)
245
+
246
+ # Format the prompt for Gemini
247
+ prompt = f"""
248
+ Analyze the user's solution and programming skill level to recommend an appropriate difficulty level.
249
+
250
+ Current Difficulty Level: {user_data["difficulty_level"]}
251
+ Challenge: {challenge["title"]}
252
+ Description: {challenge["description"]}
253
+
254
+ User's Solution:
255
+ ```python
256
+ {user_code}
257
+ ```
258
+
259
+ Evaluation Summary:
260
+ - Correctness: {"Correct" if evaluation.get("is_correct", False) else "Incorrect"}
261
+ - Code Quality Score: {evaluation.get("code_quality_score", 5)}/10
262
+ - Algorithm Efficiency Score: {evaluation.get("algorithm_efficiency_score", 5)}/10
263
+
264
+ User's History:
265
+ - Total Attempts: {user_data["total_attempts"]}
266
+ - Correct Solutions: {user_data["correct_answers"]}
267
+ - Success Rate: {user_data["correct_answers"] / user_data["total_attempts"] if user_data["total_attempts"] > 0 else 0:.2%}
268
+
269
+ Based on this information, recommend the next difficulty level (easy, medium, or hard).
270
+ Consider the following factors:
271
+ 1. Whether the solution is correct
272
+ 2. The quality and efficiency of the code
273
+ 3. The user's historical performance
274
+ 4. The current difficulty level
275
+
276
+ Provide your recommendation in the following JSON format:
277
+ {{
278
+ "recommended_difficulty": "easy/medium/hard",
279
+ "explanation": "brief explanation for the recommendation",
280
+ "skill_assessment": "brief assessment of the user's skill level"
281
+ }}
282
+
283
+ Ensure your response is valid JSON.
284
+ """
285
+
286
+ try:
287
+ # Generate content with Gemini
288
+ model = genai.GenerativeModel('gemini-1.5-pro')
289
+ response = model.generate_content(prompt)
290
+
291
+ # Parse the response
292
+ try:
293
+ result = json.loads(response.text)
294
+ old_difficulty = user_data["difficulty_level"]
295
+ user_data["difficulty_level"] = result.get("recommended_difficulty", old_difficulty)
296
+
297
+ # Ensure the difficulty is valid
298
+ if user_data["difficulty_level"] not in ["easy", "medium", "hard"]:
299
+ user_data["difficulty_level"] = old_difficulty
300
+
301
+ return result
302
+ except json.JSONDecodeError:
303
+ # If Gemini doesn't return valid JSON, use a fallback approach
304
+ return fallback_difficulty_adjustment(evaluation.get("is_correct", False))
305
+
306
+ except Exception as e:
307
+ return {
308
+ "recommended_difficulty": user_data["difficulty_level"],
309
+ "explanation": f"Error in difficulty adjustment: {str(e)}. Maintaining current difficulty.",
310
+ "skill_assessment": "Unable to assess skill level due to an error."
311
+ }
312
+
313
+ def fallback_difficulty_adjustment(is_correct):
314
+ """Fallback method to adjust difficulty based on success rate"""
315
+ if is_correct:
316
+ user_data["correct_answers"] += 1
317
+ user_data["total_attempts"] += 1
318
+
319
+ # Calculate success rate
320
+ success_rate = user_data["correct_answers"] / user_data["total_attempts"] if user_data["total_attempts"] > 0 else 0
321
+
322
+ # Adjust difficulty based on success rate
323
+ current_level = user_data["difficulty_level"]
324
+ old_level = current_level
325
+
326
+ if success_rate > 0.7 and current_level == "easy":
327
+ user_data["difficulty_level"] = "medium"
328
+ elif success_rate > 0.7 and current_level == "medium":
329
+ user_data["difficulty_level"] = "hard"
330
+ elif success_rate < 0.3 and current_level == "hard":
331
+ user_data["difficulty_level"] = "medium"
332
+ elif success_rate < 0.3 and current_level == "medium":
333
+ user_data["difficulty_level"] = "easy"
334
+
335
+ return {
336
+ "recommended_difficulty": user_data["difficulty_level"],
337
+ "explanation": f"Based on your success rate of {success_rate:.2%}, {'increasing' if user_data['difficulty_level'] != old_level and 'easy' in old_level else 'decreasing' if user_data['difficulty_level'] != old_level else 'maintaining'} difficulty.",
338
+ "skill_assessment": "Skill assessment based on success rate only."
339
+ }
340
+
341
+ def handle_submission(user_code):
342
+ """Handle user code submission"""
343
+ if not user_data["current_challenge"]:
344
+ return "Please get a challenge first."
345
+
346
+ challenge = user_data["current_challenge"]
347
+
348
+ # Evaluate the code
349
+ evaluation = evaluate_code_with_gemini(user_code, challenge)
350
+
351
+ # Track correctness
352
+ is_correct = evaluation.get("is_correct", False)
353
+ if is_correct:
354
+ user_data["correct_answers"] += 1
355
+ user_data["total_attempts"] += 1
356
+
357
+ # Adjust difficulty using LLM
358
+ difficulty_adjustment = adjust_difficulty_with_llm(user_code, evaluation, challenge)
359
+
360
+ # Format response
361
+ response = f"## Evaluation Results\n\n"
362
+ response += f"**Challenge:** {challenge['title']}\n\n"
363
+
364
+ if "test_results" in evaluation and evaluation["test_results"]:
365
+ response += "**Test Results:**\n"
366
+ for test in evaluation["test_results"]:
367
+ result = test.get("result", "N/A")
368
+ input_val = test.get("test_case", "N/A")
369
+ expected = test.get("expected", "N/A")
370
+ explanation = test.get("explanation", "N/A")
371
+ response += f"- Input: `{input_val}`, Expected: `{expected}`, Result: **{result}**\n"
372
+ response += f" {explanation}\n\n"
373
+
374
+ response += f"**Overall Assessment:** {evaluation.get('overall_assessment', 'N/A')}\n\n"
375
+ response += f"**Code Quality:** {evaluation.get('code_quality_score', 'N/A')}/10\n"
376
+ response += f"**Algorithm Efficiency:** {evaluation.get('algorithm_efficiency_score', 'N/A')}/10\n\n"
377
+ response += f"**Feedback:** {evaluation.get('feedback', 'N/A')}\n\n"
378
+
379
+ response += f"**Difficulty Adjustment:**\n"
380
+ response += f"- New Difficulty: {difficulty_adjustment.get('recommended_difficulty', user_data['difficulty_level'])}\n"
381
+ response += f"- Reason: {difficulty_adjustment.get('explanation', 'N/A')}\n"
382
+ response += f"- Skill Assessment: {difficulty_adjustment.get('skill_assessment', 'N/A')}\n"
383
+
384
+ return response
385
+
386
+ def display_challenge():
387
+ """Get and display a challenge"""
388
+ challenge = get_challenge()
389
+
390
+ response = f"## {challenge['title']}\n\n"
391
+ response += f"**Difficulty:** {user_data['difficulty_level']}\n\n"
392
+ response += f"**Description:** {challenge['description']}\n\n"
393
+ response += f"**Example Input:** {challenge['example_input']}\n"
394
+ response += f"**Example Output:** {challenge['example_output']}\n\n"
395
+ response += "Write your solution in Python and submit it when ready."
396
+
397
+ return response
398
+
399
+ def reset_session():
400
+ """Reset the user session"""
401
+ user_data["current_challenge"] = None
402
+ user_data["difficulty_level"] = "easy"
403
+ user_data["correct_answers"] = 0
404
+ user_data["total_attempts"] = 0
405
+ user_data["solution_history"] = []
406
+ return "Session reset. Your progress has been cleared and difficulty has been reset to easy."
407
+
408
+ def check_api_key():
409
+ """Check if the API key is properly configured"""
410
+ if not GEMINI_API_KEY:
411
+ return gr.Markdown("""
412
+ ## ⚠️ API Key Not Found
413
+
414
+ The Gemini API key is not configured. Please add it in the Space secrets with the name `GEMINI_API_KEY`.
415
+
416
+ ### How to add a secret:
417
+ 1. Go to the Settings tab on your Space
418
+ 2. Navigate to the "Repository secrets" section
419
+ 3. Add a new secret with the name `GEMINI_API_KEY` and your API key as the value
420
+ 4. Restart the Space
421
+ """)
422
+ else:
423
+ return gr.Markdown("# LLM-Adaptive Python Coding Challenge\nThis application provides Python coding challenges that adapt to your skill level using AI.")
424
+
425
+ # Set up the Gradio interface
426
+ with gr.Blocks(title="LLM-Adaptive Python Coding Challenge", theme=gr.themes.Base()) as app:
427
+ header = gr.Markdown("Checking API configuration...")
428
+
429
+ with gr.Row():
430
+ with gr.Column(scale=2):
431
+ challenge_display = gr.Markdown("Click 'Get Challenge' to start")
432
+
433
+ with gr.Row():
434
+ get_challenge_btn = gr.Button("Get Challenge")
435
+ reset_btn = gr.Button("Reset Progress")
436
+
437
+ code_input = gr.Code(language="python", lines=15, label="Your Solution")
438
+ submit_btn = gr.Button("Submit Solution")
439
+
440
+ with gr.Column(scale=3):
441
+ result_display = gr.Markdown("Results will appear here")
442
+
443
+ gr.Markdown("### How it works")
444
+ gr.Markdown("1. Get a challenge by clicking 'Get Challenge'")
445
+ gr.Markdown("2. Write your solution in Python")
446
+ gr.Markdown("3. Submit your solution for evaluation")
447
+ gr.Markdown("4. The AI will analyze your code and adjust the difficulty based on your coding style, efficiency, and correctness")
448
+
449
+ # Check API key on load
450
+ app.load(check_api_key, [], [header])
451
+
452
+ get_challenge_btn.click(display_challenge, inputs=[], outputs=challenge_display)
453
+ reset_btn.click(reset_session, inputs=[], outputs=result_display)
454
+ submit_btn.click(handle_submission, inputs=[code_input], outputs=result_display)
455
+
456
+ # Launch the app
457
+ if __name__ == "__main__":
458
+ app.launch()