Debugging shit
Browse files
app.py
CHANGED
@@ -10,6 +10,7 @@ from typing import Dict, Any
|
|
10 |
import autopep8
|
11 |
import textwrap
|
12 |
import json
|
|
|
13 |
|
14 |
from datasets import load_dataset
|
15 |
from fastapi.responses import StreamingResponse
|
@@ -116,7 +117,7 @@ Here's the complete Python function implementation:
|
|
116 |
formatted_code = extract_and_format_code(generated_text)
|
117 |
return {"solution": formatted_code}
|
118 |
|
119 |
-
def stream_solution(instruction: str, token: str):
|
120 |
if not verify_token(token):
|
121 |
return {"error": "Invalid token"}
|
122 |
|
@@ -133,29 +134,35 @@ Here's the complete Python function implementation:
|
|
133 |
|
134 |
```python
|
135 |
"""
|
136 |
-
|
137 |
async def generate():
|
138 |
-
|
139 |
try:
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
|
|
|
|
|
|
149 |
|
150 |
-
|
151 |
-
logger.
|
152 |
-
yield
|
153 |
except Exception as e:
|
154 |
logger.error(f"Error generating solution: {e}")
|
155 |
yield {"error": "Error generating solution"}
|
156 |
|
|
|
|
|
|
|
157 |
|
158 |
-
generated_response = generate()
|
159 |
logger.info(f"Streaming response {generated_response}")
|
160 |
return StreamingResponse(generate(), media_type="text/plain")
|
161 |
|
|
|
10 |
import autopep8
|
11 |
import textwrap
|
12 |
import json
|
13 |
+
import asyncio
|
14 |
|
15 |
from datasets import load_dataset
|
16 |
from fastapi.responses import StreamingResponse
|
|
|
117 |
formatted_code = extract_and_format_code(generated_text)
|
118 |
return {"solution": formatted_code}
|
119 |
|
120 |
+
async def stream_solution(instruction: str, token: str):
|
121 |
if not verify_token(token):
|
122 |
return {"error": "Invalid token"}
|
123 |
|
|
|
134 |
|
135 |
```python
|
136 |
"""
|
137 |
+
generated_text = ""
|
138 |
async def generate():
|
139 |
+
|
140 |
try:
|
141 |
+
# Set a timeout of 2 minutes
|
142 |
+
timeout = asyncio.TimeoutError("LLM generation timed out")
|
143 |
+
async with asyncio.TimeoutManager(timeout):
|
144 |
+
for chunk in llm(full_prompt, stream=True, **generation_kwargs):
|
145 |
+
token = chunk["choices"][0]["text"]
|
146 |
+
generated_text += token
|
147 |
+
logger.info(f"Generated text: {generated_text}")
|
148 |
+
yield token
|
149 |
+
|
150 |
+
# Optionally send progress updates:
|
151 |
+
progress = len(generated_text) / len(full_prompt)
|
152 |
+
yield {"progress": progress} # Send a progress update
|
153 |
|
154 |
+
except asyncio.TimeoutError:
|
155 |
+
logger.error("LLM generation timed out")
|
156 |
+
yield {"error": "LLM generation timed out"}
|
157 |
except Exception as e:
|
158 |
logger.error(f"Error generating solution: {e}")
|
159 |
yield {"error": "Error generating solution"}
|
160 |
|
161 |
+
formatted_code = extract_and_format_code(generated_text)
|
162 |
+
logger.info(f"Formatted code: {formatted_code}")
|
163 |
+
yield formatted_code
|
164 |
|
165 |
+
generated_response = await generate()
|
166 |
logger.info(f"Streaming response {generated_response}")
|
167 |
return StreamingResponse(generate(), media_type="text/plain")
|
168 |
|