Ais
commited on
Update app/main.py
Browse files- app/main.py +73 -551
app/main.py
CHANGED
@@ -5,10 +5,9 @@ from fastapi.responses import JSONResponse
|
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
from peft import PeftModel
|
7 |
from starlette.middleware.cors import CORSMiddleware
|
8 |
-
import re
|
9 |
|
10 |
# === Setup FastAPI ===
|
11 |
-
app = FastAPI(title="Apollo AI Backend - Qwen2-0.5B
|
12 |
|
13 |
# === CORS ===
|
14 |
app.add_middleware(
|
@@ -42,527 +41,102 @@ print("🔗 Applying LoRA adapter to Qwen2-0.5B...")
|
|
42 |
model = PeftModel.from_pretrained(base_model, ADAPTER_PATH)
|
43 |
model.eval()
|
44 |
|
45 |
-
print("✅ Qwen2-0.5B model ready
|
46 |
|
47 |
-
def
|
48 |
"""
|
49 |
-
|
50 |
"""
|
51 |
-
|
52 |
-
"
|
53 |
-
"user_messages": [],
|
54 |
-
"assistant_messages": [],
|
55 |
-
"topics": [],
|
56 |
-
"current_topic": None,
|
57 |
-
"user_attempted_code": False,
|
58 |
-
"user_stuck": False,
|
59 |
-
"repeated_questions": 0,
|
60 |
-
"question_type": "general",
|
61 |
-
"learning_progression": "beginner"
|
62 |
-
}
|
63 |
-
|
64 |
-
# Get last 6 messages (3 user + 3 assistant)
|
65 |
-
recent_messages = messages[-6:] if len(messages) > 6 else messages
|
66 |
-
|
67 |
-
for msg in recent_messages:
|
68 |
-
context["conversation_history"].append({
|
69 |
-
"role": msg.get("role"),
|
70 |
-
"content": msg.get("content", "")
|
71 |
-
})
|
72 |
-
|
73 |
-
if msg.get("role") == "user":
|
74 |
-
content = msg.get("content", "").lower()
|
75 |
-
context["user_messages"].append(msg.get("content", ""))
|
76 |
-
|
77 |
-
# Detect question types
|
78 |
-
if "what" in content and ("print" in content or "output" in content):
|
79 |
-
context["question_type"] = "basic_concept"
|
80 |
-
context["current_topic"] = "print_function"
|
81 |
-
elif "output" in content and "print" in content:
|
82 |
-
context["question_type"] = "prediction"
|
83 |
-
context["current_topic"] = "print_output"
|
84 |
-
elif "calculator" in content or "create" in content:
|
85 |
-
context["question_type"] = "project_request"
|
86 |
-
context["current_topic"] = "calculator"
|
87 |
-
elif "function" in content:
|
88 |
-
context["question_type"] = "concept_inquiry"
|
89 |
-
context["current_topic"] = "functions"
|
90 |
-
elif "variable" in content:
|
91 |
-
context["question_type"] = "concept_inquiry"
|
92 |
-
context["current_topic"] = "variables"
|
93 |
-
elif "error" in content or "not working" in content or "tried" in content:
|
94 |
-
context["user_attempted_code"] = True
|
95 |
-
context["question_type"] = "debugging"
|
96 |
-
|
97 |
-
# Check for repeated similar questions
|
98 |
-
if len(context["user_messages"]) >= 2:
|
99 |
-
recent_questions = context["user_messages"][-2:]
|
100 |
-
similarity_keywords = ["what", "how", "print", "output", "function"]
|
101 |
-
common_words = 0
|
102 |
-
for keyword in similarity_keywords:
|
103 |
-
if keyword in recent_questions[0].lower() and keyword in recent_questions[1].lower():
|
104 |
-
common_words += 1
|
105 |
-
if common_words >= 2:
|
106 |
-
context["repeated_questions"] += 1
|
107 |
-
|
108 |
-
elif msg.get("role") == "assistant":
|
109 |
-
context["assistant_messages"].append(msg.get("content", ""))
|
110 |
-
|
111 |
-
# Determine learning progression
|
112 |
-
if len(context["user_messages"]) > 2:
|
113 |
-
context["learning_progression"] = "intermediate"
|
114 |
-
if context["user_attempted_code"]:
|
115 |
-
context["learning_progression"] = "hands_on"
|
116 |
-
|
117 |
-
return context
|
118 |
-
|
119 |
-
def generate_mentor_response(user_message: str, context: dict) -> str:
|
120 |
-
"""
|
121 |
-
Generate context-aware mentor responses that guide learning through questions.
|
122 |
-
"""
|
123 |
-
user_lower = user_message.lower()
|
124 |
-
question_type = context.get("question_type", "general")
|
125 |
-
current_topic = context.get("current_topic", None)
|
126 |
-
user_attempted = context.get("user_attempted_code", False)
|
127 |
-
conversation_length = len(context.get("user_messages", []))
|
128 |
-
|
129 |
-
print(f"🎓 Mentor mode - Question type: {question_type}, Topic: {current_topic}, Attempted: {user_attempted}")
|
130 |
-
|
131 |
-
# Handle basic concept questions about print()
|
132 |
-
if "what" in user_lower and "print" in user_lower:
|
133 |
-
if "use" in user_lower or "does" in user_lower:
|
134 |
-
return """What do you think the word "print" suggests? 🤔
|
135 |
-
|
136 |
-
In everyday life, when we print something, we make it visible on paper. What do you think `print()` might do in Python?
|
137 |
-
|
138 |
-
**Think about:**
|
139 |
-
- Where would Python show information to you?
|
140 |
-
- If you wanted to see the result of your code, how would Python display it?
|
141 |
-
|
142 |
-
Try to guess what happens when you run `print("hello")`!"""
|
143 |
-
|
144 |
-
return """Good question! Let's think step by step:
|
145 |
-
|
146 |
-
**What does "print" mean in real life?**
|
147 |
-
When you print a document, you make it visible, right?
|
148 |
-
|
149 |
-
**In Python, where do you think the output would appear?**
|
150 |
-
- On your screen?
|
151 |
-
- In a file?
|
152 |
-
- Somewhere else?
|
153 |
-
|
154 |
-
What do you think `print()` is designed to do? Take a guess! 🤔"""
|
155 |
-
|
156 |
-
# Handle output prediction questions
|
157 |
-
if ("output" in user_lower or "result" in user_lower) and "print" in user_lower:
|
158 |
-
if current_topic == "print_function" or "print" in user_lower:
|
159 |
-
return """Great follow-up question! You're thinking like a programmer! 🎯
|
160 |
-
|
161 |
-
**Before I tell you, let's think:**
|
162 |
-
1. What's inside those quotation marks?
|
163 |
-
2. When Python sees `print("something")`, what do you think it does with that "something"?
|
164 |
-
|
165 |
-
**Try to predict:**
|
166 |
-
- Will it show exactly what's in the quotes?
|
167 |
-
- Will it change it somehow?
|
168 |
-
- Where will you see the result?
|
169 |
-
|
170 |
-
What's your prediction? Then try running it and see if you're right! 🔍"""
|
171 |
-
|
172 |
-
# Handle calculator project requests
|
173 |
-
if "calculator" in user_lower and ("create" in user_lower or "make" in user_lower):
|
174 |
-
if conversation_length == 1: # First time asking
|
175 |
-
return """Excellent project choice! Let's break this down step by step 🧮
|
176 |
-
|
177 |
-
**Think about using a calculator in real life:**
|
178 |
-
1. What's the first thing you need to input?
|
179 |
-
2. What operation do you want to perform?
|
180 |
-
3. What's the second number?
|
181 |
-
4. What should happen next?
|
182 |
-
|
183 |
-
**Start simple:** How would you get just ONE number from the user in Python? What function do you think gets user input? 🤔
|
184 |
-
|
185 |
-
Once you figure that out, we'll build on it!"""
|
186 |
-
else: # Follow-up on calculator
|
187 |
-
return """Great! You're building on what you know! 🔨
|
188 |
-
|
189 |
-
**Next step thinking:**
|
190 |
-
- You can get user input ✓
|
191 |
-
- Now how do you perform math operations?
|
192 |
-
- What if the user wants addition? Subtraction?
|
193 |
-
|
194 |
-
**Challenge:** Can you think of a way to let the user CHOOSE which operation they want?
|
195 |
-
|
196 |
-
Hint: How does your code make decisions? What happens "IF" the user picks "+"? 🤔"""
|
197 |
-
|
198 |
-
# Handle debugging/error situations
|
199 |
-
if user_attempted and ("error" in user_lower or "not working" in user_lower or "tried" in user_lower):
|
200 |
-
return """I love that you're experimenting! That's how you learn! 🔧
|
201 |
-
|
202 |
-
**Debugging steps:**
|
203 |
-
1. What exactly did you type?
|
204 |
-
2. What happened when you ran it?
|
205 |
-
3. What did you expect to happen?
|
206 |
-
4. Are there any red error messages?
|
207 |
-
|
208 |
-
**Common issues to check:**
|
209 |
-
- Did you use parentheses `()` correctly?
|
210 |
-
- Are your quotation marks matched?
|
211 |
-
- Did you spell everything correctly?
|
212 |
-
|
213 |
-
Share what you tried and what error you got - let's debug it together! 🐛"""
|
214 |
-
|
215 |
-
# Handle function-related questions
|
216 |
-
if "function" in user_lower:
|
217 |
-
if current_topic == "print_function":
|
218 |
-
return """Perfect! You're asking the right questions! 🎯
|
219 |
-
|
220 |
-
**Let's think about functions:**
|
221 |
-
- What's a function in math? (like f(x) = x + 2)
|
222 |
-
- It takes input and gives output, right?
|
223 |
-
|
224 |
-
**In Python:**
|
225 |
-
- `print()` is a function
|
226 |
-
- What goes inside the parentheses `()` is the input
|
227 |
-
- What do you think the output is?
|
228 |
-
|
229 |
-
**Try this thinking exercise:**
|
230 |
-
If `print()` is like a machine, what does it do with whatever you put inside? 🤖"""
|
231 |
-
|
232 |
-
# Handle variable questions
|
233 |
-
if "variable" in user_lower:
|
234 |
-
return """Variables are like labeled boxes! 📦
|
235 |
-
|
236 |
-
**Think about it:**
|
237 |
-
- How do you remember someone's name?
|
238 |
-
- How do you store something for later?
|
239 |
-
|
240 |
-
**In Python:**
|
241 |
-
- How would you tell Python to "remember" a number?
|
242 |
-
- What symbol might connect a name to a value?
|
243 |
-
|
244 |
-
Try to guess: `age __ 25` - what goes in the blank? 🤔"""
|
245 |
-
|
246 |
-
# Handle repeated questions (user might be stuck)
|
247 |
-
if context.get("repeated_questions", 0) > 0:
|
248 |
-
return """I notice you're asking similar questions - that's totally fine! Learning takes time! 📚
|
249 |
-
|
250 |
-
**Let's try a different approach:**
|
251 |
-
1. What specific part is confusing you?
|
252 |
-
2. Have you tried running any code yet?
|
253 |
-
3. What happened when you tried?
|
254 |
-
|
255 |
-
**Suggestion:** Start with something super simple:
|
256 |
-
- Open Python
|
257 |
-
- Type one line of code
|
258 |
-
- See what happens
|
259 |
-
|
260 |
-
What's the smallest thing you could try right now? 🚀"""
|
261 |
-
|
262 |
-
# Generic mentor response with context awareness
|
263 |
-
if conversation_length > 0:
|
264 |
-
return """I can see you're building on our conversation! That's great! 🎯
|
265 |
-
|
266 |
-
**Let's break down your question:**
|
267 |
-
- What specifically do you want to understand?
|
268 |
-
- Are you trying to predict what will happen?
|
269 |
-
- Or are you looking to build something?
|
270 |
-
|
271 |
-
**Think step by step:**
|
272 |
-
What's the smallest piece of this problem you could solve first? 🧩"""
|
273 |
-
|
274 |
-
# Default mentor response
|
275 |
-
return """Interesting question! Let's think through this together! 🤔
|
276 |
-
|
277 |
-
**Questions to consider:**
|
278 |
-
- What are you trying to accomplish?
|
279 |
-
- What do you already know about this topic?
|
280 |
-
- What's the first small step you could take?
|
281 |
-
|
282 |
-
Break it down into smaller pieces - what would you try first? 🚀"""
|
283 |
-
|
284 |
-
def generate_force_response(user_message: str, context: dict) -> str:
|
285 |
-
"""
|
286 |
-
Generate direct, complete answers for force mode.
|
287 |
-
"""
|
288 |
-
user_lower = user_message.lower()
|
289 |
-
current_topic = context.get("current_topic", None)
|
290 |
-
|
291 |
-
print(f"⚡ Force mode - Topic: {current_topic}")
|
292 |
-
|
293 |
-
# Direct answer for print() function questions
|
294 |
-
if "what" in user_lower and "print" in user_lower:
|
295 |
-
if "use" in user_lower or "does" in user_lower or "function" in user_lower:
|
296 |
-
return """`print()` is a built-in Python function that displays output to the console/screen.
|
297 |
-
|
298 |
-
**Purpose:** Shows text, numbers, or variables to the user.
|
299 |
-
|
300 |
-
**Syntax:** `print(value)`
|
301 |
-
|
302 |
-
**Examples:**
|
303 |
-
```python
|
304 |
-
print("Hello World") # Outputs: Hello World
|
305 |
-
print(42) # Outputs: 42
|
306 |
-
print(3 + 5) # Outputs: 8
|
307 |
-
```
|
308 |
-
|
309 |
-
**What it does:** Takes whatever you put inside the parentheses and displays it on the screen."""
|
310 |
-
|
311 |
-
# Direct answer for output prediction
|
312 |
-
if ("output" in user_lower or "result" in user_lower) and "print" in user_lower:
|
313 |
-
# Check if they're asking about a specific print statement
|
314 |
-
if '"ais"' in user_message or "'ais'" in user_message:
|
315 |
-
return """The output of `print("ais")` will be exactly:
|
316 |
-
|
317 |
-
```
|
318 |
-
ais
|
319 |
-
```
|
320 |
-
|
321 |
-
**Explanation:** The `print()` function displays whatever text is inside the quotation marks, without the quotes themselves. So `"ais"` becomes just `ais` on the screen."""
|
322 |
-
|
323 |
-
elif "hello" in user_lower:
|
324 |
-
return """The output of `print("Hello World")` will be:
|
325 |
-
|
326 |
-
```
|
327 |
-
Hello World
|
328 |
-
```
|
329 |
-
|
330 |
-
The text inside the quotes appears on the screen without the quotation marks."""
|
331 |
-
|
332 |
-
return """The output depends on what's inside the `print()` function:
|
333 |
-
|
334 |
-
**Examples:**
|
335 |
-
- `print("text")` → displays: `text`
|
336 |
-
- `print(123)` → displays: `123`
|
337 |
-
- `print(2 + 3)` → displays: `5`
|
338 |
-
|
339 |
-
The `print()` function shows the value without quotes (for strings) or evaluates expressions first."""
|
340 |
-
|
341 |
-
# Direct answer for calculator project
|
342 |
-
if "calculator" in user_lower and ("create" in user_lower or "make" in user_lower):
|
343 |
-
return """Here's a complete working calculator:
|
344 |
-
|
345 |
-
```python
|
346 |
-
# Simple Calculator
|
347 |
-
print("=== Simple Calculator ===")
|
348 |
-
|
349 |
-
# Get input from user
|
350 |
-
num1 = float(input("Enter first number: "))
|
351 |
-
operator = input("Enter operator (+, -, *, /): ")
|
352 |
-
num2 = float(input("Enter second number: "))
|
353 |
-
|
354 |
-
# Perform calculation
|
355 |
-
if operator == '+':
|
356 |
-
result = num1 + num2
|
357 |
-
elif operator == '-':
|
358 |
-
result = num1 - num2
|
359 |
-
elif operator == '*':
|
360 |
-
result = num1 * num2
|
361 |
-
elif operator == '/':
|
362 |
-
if num2 != 0:
|
363 |
-
result = num1 / num2
|
364 |
else:
|
365 |
-
|
366 |
-
else:
|
367 |
-
result = "Error: Invalid operator"
|
368 |
-
|
369 |
-
# Display result
|
370 |
-
print(f"Result: {result}")
|
371 |
-
```
|
372 |
-
|
373 |
-
**How it works:**
|
374 |
-
1. Gets two numbers from user using `input()` and converts to `float()`
|
375 |
-
2. Gets the operator (+, -, *, /)
|
376 |
-
3. Uses `if/elif` statements to perform the correct operation
|
377 |
-
4. Displays the result using `print()`"""
|
378 |
|
379 |
-
#
|
380 |
-
|
381 |
-
return """Functions in Python are reusable blocks of code that perform specific tasks.
|
382 |
-
|
383 |
-
**Defining a function:**
|
384 |
-
```python
|
385 |
-
def function_name(parameters):
|
386 |
-
# code here
|
387 |
-
return result
|
388 |
-
```
|
389 |
-
|
390 |
-
**Example:**
|
391 |
-
```python
|
392 |
-
def greet(name):
|
393 |
-
return f"Hello, {name}!"
|
394 |
-
|
395 |
-
def add_numbers(a, b):
|
396 |
-
return a + b
|
397 |
-
|
398 |
-
# Calling functions
|
399 |
-
message = greet("Alice") # Returns "Hello, Alice!"
|
400 |
-
sum_result = add_numbers(5, 3) # Returns 8
|
401 |
-
```
|
402 |
-
|
403 |
-
**Key points:**
|
404 |
-
- Use `def` keyword to define functions
|
405 |
-
- Functions can take parameters (inputs)
|
406 |
-
- Use `return` to send back a result
|
407 |
-
- Call functions by using their name with parentheses"""
|
408 |
-
|
409 |
-
# Direct answer for variables
|
410 |
-
if "variable" in user_lower:
|
411 |
-
return """Variables in Python store data values using the assignment operator `=`.
|
412 |
-
|
413 |
-
**Syntax:** `variable_name = value`
|
414 |
-
|
415 |
-
**Examples:**
|
416 |
-
```python
|
417 |
-
name = "John" # String variable
|
418 |
-
age = 25 # Integer variable
|
419 |
-
height = 5.8 # Float variable
|
420 |
-
is_student = True # Boolean variable
|
421 |
-
```
|
422 |
-
|
423 |
-
**Rules:**
|
424 |
-
- Variable names can contain letters, numbers, and underscores
|
425 |
-
- Must start with a letter or underscore
|
426 |
-
- Case-sensitive (`age` and `Age` are different)
|
427 |
-
- Use descriptive names (`user_age` not `x`)
|
428 |
-
|
429 |
-
**Using variables:**
|
430 |
-
```python
|
431 |
-
print(name) # Outputs: John
|
432 |
-
print(age + 5) # Outputs: 30
|
433 |
-
```"""
|
434 |
-
|
435 |
-
# Direct answer for input function
|
436 |
-
if "input" in user_lower and ("function" in user_lower or "how" in user_lower):
|
437 |
-
return """`input()` function gets text from the user.
|
438 |
-
|
439 |
-
**Syntax:** `variable = input("prompt message")`
|
440 |
-
|
441 |
-
**Examples:**
|
442 |
-
```python
|
443 |
-
name = input("Enter your name: ")
|
444 |
-
age = input("Enter your age: ")
|
445 |
-
print(f"Hello {name}, you are {age} years old")
|
446 |
-
```
|
447 |
-
|
448 |
-
**Important:** `input()` always returns a string. For numbers, convert:
|
449 |
-
```python
|
450 |
-
age = int(input("Enter age: ")) # For whole numbers
|
451 |
-
price = float(input("Enter price: ")) # For decimals
|
452 |
-
```
|
453 |
-
|
454 |
-
**Common pattern:**
|
455 |
-
```python
|
456 |
-
user_input = input("Your choice: ")
|
457 |
-
print(f"You entered: {user_input}")
|
458 |
-
```"""
|
459 |
|
460 |
-
#
|
461 |
-
|
462 |
-
|
463 |
-
**Try asking:**
|
464 |
-
- "What does print() do in Python?"
|
465 |
-
- "How do I create variables?"
|
466 |
-
- "Show me how to make a calculator"
|
467 |
-
- "What is the output of print('hello')?"
|
468 |
-
|
469 |
-
Please rephrase your question more specifically."""
|
470 |
-
|
471 |
-
def extract_clean_answer(full_response: str, formatted_prompt: str, user_message: str, context: dict, is_force_mode: bool) -> str:
|
472 |
-
"""
|
473 |
-
FIXED: Clean response extraction with proper mode handling and context awareness.
|
474 |
-
"""
|
475 |
-
if not full_response or len(full_response.strip()) < 5:
|
476 |
-
# Fallback to context-aware responses
|
477 |
-
if is_force_mode:
|
478 |
-
return generate_force_response(user_message, context)
|
479 |
-
else:
|
480 |
-
return generate_mentor_response(user_message, context)
|
481 |
|
482 |
-
|
483 |
-
|
484 |
-
|
|
|
|
|
|
|
|
|
485 |
|
486 |
-
|
487 |
-
|
488 |
-
predefined_response = generate_force_response(user_message, context)
|
489 |
-
print("✅ Using context-aware FORCE response")
|
490 |
-
return predefined_response
|
491 |
-
else:
|
492 |
-
predefined_response = generate_mentor_response(user_message, context)
|
493 |
-
print("✅ Using context-aware MENTOR response")
|
494 |
-
return predefined_response
|
495 |
|
496 |
def generate_response(messages: list, is_force_mode: bool = False, max_tokens: int = 200, temperature: float = 0.7) -> str:
|
497 |
"""
|
498 |
-
|
499 |
"""
|
500 |
try:
|
501 |
-
#
|
502 |
-
|
503 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
504 |
|
505 |
-
#
|
506 |
-
|
507 |
-
for msg in reversed(messages):
|
508 |
-
if msg.get("role") == "user":
|
509 |
-
current_user_message = msg.get("content", "")
|
510 |
-
break
|
511 |
|
512 |
-
|
513 |
-
|
514 |
|
515 |
-
|
516 |
-
|
517 |
-
print(f"🔍 Question type: {context.get('question_type', 'unknown')}")
|
518 |
-
print(f"📖 Current topic: {context.get('current_topic', 'general')}")
|
519 |
|
520 |
-
#
|
521 |
-
if
|
522 |
-
response =
|
523 |
-
print("✅ Generated FORCE mode response")
|
524 |
-
else:
|
525 |
-
response = generate_mentor_response(current_user_message, context)
|
526 |
-
print("✅ Generated MENTOR mode response")
|
527 |
|
528 |
-
|
529 |
-
if not is_force_mode:
|
530 |
-
# Mentor mode should ask questions or provide guidance
|
531 |
-
has_questions = '?' in response or any(word in response.lower() for word in ['think', 'consider', 'try', 'what', 'how', 'why'])
|
532 |
-
if not has_questions:
|
533 |
-
print("⚠️ Mentor response lacks questions, enhancing...")
|
534 |
-
response += "\n\nWhat do you think? Give it a try! 🤔"
|
535 |
-
else:
|
536 |
-
# Force mode should provide direct answers
|
537 |
-
if len(response) < 30 and 'specific' in response:
|
538 |
-
print("⚠️ Force response too vague, enhancing...")
|
539 |
-
response = generate_force_response(current_user_message, context)
|
540 |
|
541 |
-
|
542 |
-
|
|
|
|
|
|
|
|
|
543 |
|
544 |
return response
|
545 |
|
546 |
except Exception as e:
|
547 |
print(f"❌ Generation error: {e}")
|
548 |
-
# Context-aware error fallback
|
549 |
if is_force_mode:
|
550 |
-
return "I encountered an error
|
551 |
else:
|
552 |
-
return "I had trouble processing that.
|
553 |
|
554 |
# === Routes ===
|
555 |
@app.get("/")
|
556 |
def root():
|
557 |
return {
|
558 |
-
"message": "🤖 Apollo AI Backend
|
559 |
"model": "Qwen/Qwen2-0.5B-Instruct with LoRA",
|
560 |
"status": "ready",
|
561 |
-
"optimizations": ["context_aware", "conversation_history", "progressive_guidance", "guaranteed_mode_compliance"],
|
562 |
-
"features": ["mentor_mode", "force_mode", "context_analysis", "topic_tracking"],
|
563 |
"modes": {
|
564 |
-
"mentor": "Guides learning with
|
565 |
-
"force": "Provides direct answers
|
566 |
}
|
567 |
}
|
568 |
|
@@ -571,8 +145,7 @@ def health():
|
|
571 |
return {
|
572 |
"status": "healthy",
|
573 |
"model_loaded": True,
|
574 |
-
"model_size": "0.5B"
|
575 |
-
"optimizations": "context_aware_with_guaranteed_mode_compliance"
|
576 |
}
|
577 |
|
578 |
@app.post("/v1/chat/completions")
|
@@ -597,7 +170,7 @@ async def chat_completions(request: Request):
|
|
597 |
body = await request.json()
|
598 |
messages = body.get("messages", [])
|
599 |
max_tokens = min(body.get("max_tokens", 200), 400)
|
600 |
-
temperature = max(0.1, min(body.get("temperature", 0.
|
601 |
|
602 |
is_force_mode = body.get("force_mode", False)
|
603 |
|
@@ -619,8 +192,8 @@ async def chat_completions(request: Request):
|
|
619 |
)
|
620 |
|
621 |
try:
|
622 |
-
print(f"📥 Processing
|
623 |
-
print(f"📊 Total
|
624 |
|
625 |
response_content = generate_response(
|
626 |
messages=messages,
|
@@ -630,10 +203,10 @@ async def chat_completions(request: Request):
|
|
630 |
)
|
631 |
|
632 |
return {
|
633 |
-
"id": f"chatcmpl-apollo-
|
634 |
"object": "chat.completion",
|
635 |
"created": int(torch.tensor(0).item()),
|
636 |
-
"model": f"qwen2-0.5b-{'force' if is_force_mode else 'mentor'}
|
637 |
"choices": [
|
638 |
{
|
639 |
"index": 0,
|
@@ -649,8 +222,7 @@ async def chat_completions(request: Request):
|
|
649 |
"completion_tokens": len(response_content),
|
650 |
"total_tokens": len(str(messages)) + len(response_content)
|
651 |
},
|
652 |
-
"apollo_mode": "force" if is_force_mode else "mentor"
|
653 |
-
"model_optimizations": "context_aware_conversation_with_guaranteed_compliance"
|
654 |
}
|
655 |
|
656 |
except Exception as e:
|
@@ -660,60 +232,10 @@ async def chat_completions(request: Request):
|
|
660 |
content={"error": f"Internal server error: {str(e)}"}
|
661 |
)
|
662 |
|
663 |
-
@app.post("/test")
|
664 |
-
async def test_generation(request: Request):
|
665 |
-
"""Enhanced test endpoint with conversation context and mode validation"""
|
666 |
-
try:
|
667 |
-
body = await request.json()
|
668 |
-
prompt = body.get("prompt", "What does print() do in Python?")
|
669 |
-
max_tokens = min(body.get("max_tokens", 200), 400)
|
670 |
-
test_both_modes = body.get("test_both_modes", True)
|
671 |
-
|
672 |
-
# Simulate conversation context
|
673 |
-
messages = [{"role": "user", "content": prompt}]
|
674 |
-
|
675 |
-
results = {}
|
676 |
-
|
677 |
-
# Test mentor mode
|
678 |
-
mentor_response = generate_response(messages, is_force_mode=False, max_tokens=max_tokens, temperature=0.4)
|
679 |
-
results["mentor_mode"] = {
|
680 |
-
"response": mentor_response,
|
681 |
-
"length": len(mentor_response),
|
682 |
-
"mode": "mentor",
|
683 |
-
"asks_questions": "?" in mentor_response,
|
684 |
-
"has_guidance_words": any(word in mentor_response.lower() for word in ['think', 'try', 'consider', 'what', 'how'])
|
685 |
-
}
|
686 |
-
|
687 |
-
if test_both_modes:
|
688 |
-
# Test force mode
|
689 |
-
force_response = generate_response(messages, is_force_mode=True, max_tokens=max_tokens, temperature=0.2)
|
690 |
-
results["force_mode"] = {
|
691 |
-
"response": force_response,
|
692 |
-
"length": len(force_response),
|
693 |
-
"mode": "force",
|
694 |
-
"provides_code": "```" in force_response or "`" in force_response,
|
695 |
-
"is_direct": len(force_response) > 50 and not ("think" in force_response.lower() and "?" in force_response)
|
696 |
-
}
|
697 |
-
|
698 |
-
return {
|
699 |
-
"prompt": prompt,
|
700 |
-
"results": results,
|
701 |
-
"model": "Qwen2-0.5B-Instruct-Fixed",
|
702 |
-
"optimizations": "context_aware_conversation_with_guaranteed_mode_compliance",
|
703 |
-
"status": "success"
|
704 |
-
}
|
705 |
-
|
706 |
-
except Exception as e:
|
707 |
-
return JSONResponse(
|
708 |
-
status_code=500,
|
709 |
-
content={"error": str(e)}
|
710 |
-
)
|
711 |
-
|
712 |
if __name__ == "__main__":
|
713 |
import uvicorn
|
714 |
-
print("🚀 Starting
|
715 |
print("🧠 Model: Qwen/Qwen2-0.5B-Instruct (500M parameters)")
|
716 |
-
print("
|
717 |
-
print("
|
718 |
-
print("🔧 Fixed: Proper mode detection, conversation context, topic tracking")
|
719 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
from peft import PeftModel
|
7 |
from starlette.middleware.cors import CORSMiddleware
|
|
|
8 |
|
9 |
# === Setup FastAPI ===
|
10 |
+
app = FastAPI(title="Apollo AI Backend - Qwen2-0.5B", version="3.0.0")
|
11 |
|
12 |
# === CORS ===
|
13 |
app.add_middleware(
|
|
|
41 |
model = PeftModel.from_pretrained(base_model, ADAPTER_PATH)
|
42 |
model.eval()
|
43 |
|
44 |
+
print("✅ Qwen2-0.5B model ready!")
|
45 |
|
46 |
+
def create_conversation_prompt(messages: list, is_force_mode: bool) -> str:
|
47 |
"""
|
48 |
+
Create a simple conversation prompt with appropriate system instruction
|
49 |
"""
|
50 |
+
if is_force_mode:
|
51 |
+
system_prompt = "You are a helpful coding assistant. Give direct, clear answers with code examples when needed. Be concise and practical."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
else:
|
53 |
+
system_prompt = "You are a teacher helping a student learn programming. Don't give direct answers. Instead, ask guiding questions to help them think and discover the solution themselves. Guide them step by step with questions like 'What do you think...?' or 'How would you...?'"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
+
# Build conversation
|
56 |
+
conversation = f"System: {system_prompt}\n\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
+
# Add last 6 messages (3 pairs) for context
|
59 |
+
recent_messages = messages[-6:] if len(messages) > 6 else messages
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
+
for msg in recent_messages:
|
62 |
+
role = msg.get("role", "")
|
63 |
+
content = msg.get("content", "")
|
64 |
+
if role == "user":
|
65 |
+
conversation += f"Student: {content}\n"
|
66 |
+
elif role == "assistant":
|
67 |
+
conversation += f"Assistant: {content}\n"
|
68 |
|
69 |
+
conversation += "Assistant:"
|
70 |
+
return conversation
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
def generate_response(messages: list, is_force_mode: bool = False, max_tokens: int = 200, temperature: float = 0.7) -> str:
|
73 |
"""
|
74 |
+
Generate response using the actual AI model
|
75 |
"""
|
76 |
try:
|
77 |
+
# Create conversation prompt
|
78 |
+
prompt = create_conversation_prompt(messages, is_force_mode)
|
79 |
+
|
80 |
+
print(f"🎯 Generating {'FORCE' if is_force_mode else 'MENTOR'} response")
|
81 |
+
print(f"📝 Prompt length: {len(prompt)}")
|
82 |
+
|
83 |
+
# Tokenize input
|
84 |
+
inputs = tokenizer(prompt, return_tensors="pt", max_length=1024, truncation=True)
|
85 |
+
|
86 |
+
# Generate response
|
87 |
+
with torch.no_grad():
|
88 |
+
outputs = model.generate(
|
89 |
+
inputs.input_ids,
|
90 |
+
max_new_tokens=max_tokens,
|
91 |
+
temperature=temperature,
|
92 |
+
do_sample=True,
|
93 |
+
pad_token_id=tokenizer.eos_token_id,
|
94 |
+
eos_token_id=tokenizer.eos_token_id,
|
95 |
+
top_p=0.9,
|
96 |
+
repetition_penalty=1.1
|
97 |
+
)
|
98 |
|
99 |
+
# Decode response
|
100 |
+
full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
101 |
|
102 |
+
# Extract only the new generated part
|
103 |
+
response = full_response[len(prompt):].strip()
|
104 |
|
105 |
+
# Clean up response
|
106 |
+
response = response.replace("Student:", "").replace("Assistant:", "").strip()
|
|
|
|
|
107 |
|
108 |
+
# Remove any system mentions
|
109 |
+
if response.startswith("System:"):
|
110 |
+
response = response.split("\n", 1)[-1].strip()
|
|
|
|
|
|
|
|
|
111 |
|
112 |
+
print(f"✅ Generated response length: {len(response)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
+
if not response or len(response) < 10:
|
115 |
+
# Fallback responses
|
116 |
+
if is_force_mode:
|
117 |
+
return "I need more specific information to provide a direct answer. Please clarify your question."
|
118 |
+
else:
|
119 |
+
return "That's an interesting question! What do you think the answer might be? Try to break it down step by step."
|
120 |
|
121 |
return response
|
122 |
|
123 |
except Exception as e:
|
124 |
print(f"❌ Generation error: {e}")
|
|
|
125 |
if is_force_mode:
|
126 |
+
return "I encountered an error. Please try rephrasing your question."
|
127 |
else:
|
128 |
+
return "I had trouble processing that. Can you tell me what you're trying to understand?"
|
129 |
|
130 |
# === Routes ===
|
131 |
@app.get("/")
|
132 |
def root():
|
133 |
return {
|
134 |
+
"message": "🤖 Apollo AI Backend v3.0 - Qwen2-0.5B",
|
135 |
"model": "Qwen/Qwen2-0.5B-Instruct with LoRA",
|
136 |
"status": "ready",
|
|
|
|
|
137 |
"modes": {
|
138 |
+
"mentor": "Guides learning with questions",
|
139 |
+
"force": "Provides direct answers"
|
140 |
}
|
141 |
}
|
142 |
|
|
|
145 |
return {
|
146 |
"status": "healthy",
|
147 |
"model_loaded": True,
|
148 |
+
"model_size": "0.5B"
|
|
|
149 |
}
|
150 |
|
151 |
@app.post("/v1/chat/completions")
|
|
|
170 |
body = await request.json()
|
171 |
messages = body.get("messages", [])
|
172 |
max_tokens = min(body.get("max_tokens", 200), 400)
|
173 |
+
temperature = max(0.1, min(body.get("temperature", 0.7), 1.0))
|
174 |
|
175 |
is_force_mode = body.get("force_mode", False)
|
176 |
|
|
|
192 |
)
|
193 |
|
194 |
try:
|
195 |
+
print(f"📥 Processing request in {'FORCE' if is_force_mode else 'MENTOR'} mode")
|
196 |
+
print(f"📊 Total messages: {len(messages)}")
|
197 |
|
198 |
response_content = generate_response(
|
199 |
messages=messages,
|
|
|
203 |
)
|
204 |
|
205 |
return {
|
206 |
+
"id": f"chatcmpl-apollo-{hash(str(messages)) % 10000}",
|
207 |
"object": "chat.completion",
|
208 |
"created": int(torch.tensor(0).item()),
|
209 |
+
"model": f"qwen2-0.5b-{'force' if is_force_mode else 'mentor'}",
|
210 |
"choices": [
|
211 |
{
|
212 |
"index": 0,
|
|
|
222 |
"completion_tokens": len(response_content),
|
223 |
"total_tokens": len(str(messages)) + len(response_content)
|
224 |
},
|
225 |
+
"apollo_mode": "force" if is_force_mode else "mentor"
|
|
|
226 |
}
|
227 |
|
228 |
except Exception as e:
|
|
|
232 |
content={"error": f"Internal server error: {str(e)}"}
|
233 |
)
|
234 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
if __name__ == "__main__":
|
236 |
import uvicorn
|
237 |
+
print("🚀 Starting Apollo AI Backend v3.0 - Simple & Clean...")
|
238 |
print("🧠 Model: Qwen/Qwen2-0.5B-Instruct (500M parameters)")
|
239 |
+
print("🎯 Mentor Mode: Asks guiding questions")
|
240 |
+
print("⚡ Force Mode: Gives direct answers")
|
|
|
241 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|