Debugging shit
Browse files
app.py
CHANGED
@@ -10,7 +10,6 @@ from typing import Dict, Any
|
|
10 |
import autopep8
|
11 |
import textwrap
|
12 |
import json
|
13 |
-
import asyncio
|
14 |
|
15 |
from datasets import load_dataset
|
16 |
from fastapi.responses import StreamingResponse
|
@@ -138,10 +137,7 @@ Here's the complete Python function implementation:
|
|
138 |
def generate():
|
139 |
|
140 |
try:
|
141 |
-
|
142 |
-
timeout = asyncio.TimeoutError("LLM generation timed out")
|
143 |
-
with asyncio.TimeoutManager(timeout):
|
144 |
-
for chunk in llm(full_prompt, stream=True, **generation_kwargs):
|
145 |
token = chunk["choices"][0]["text"]
|
146 |
generated_text += token
|
147 |
logger.info(f"Generated text: {generated_text}")
|
@@ -150,10 +146,6 @@ Here's the complete Python function implementation:
|
|
150 |
# Optionally send progress updates:
|
151 |
progress = len(generated_text) / len(full_prompt)
|
152 |
yield {"progress": progress} # Send a progress update
|
153 |
-
|
154 |
-
except asyncio.TimeoutError:
|
155 |
-
logger.error("LLM generation timed out")
|
156 |
-
yield {"error": "LLM generation timed out"}
|
157 |
except Exception as e:
|
158 |
logger.error(f"Error generating solution: {e}")
|
159 |
yield {"error": "Error generating solution"}
|
|
|
10 |
import autopep8
|
11 |
import textwrap
|
12 |
import json
|
|
|
13 |
|
14 |
from datasets import load_dataset
|
15 |
from fastapi.responses import StreamingResponse
|
|
|
137 |
def generate():
|
138 |
|
139 |
try:
|
140 |
+
for chunk in llm(full_prompt, stream=True, **generation_kwargs):
|
|
|
|
|
|
|
141 |
token = chunk["choices"][0]["text"]
|
142 |
generated_text += token
|
143 |
logger.info(f"Generated text: {generated_text}")
|
|
|
146 |
# Optionally send progress updates:
|
147 |
progress = len(generated_text) / len(full_prompt)
|
148 |
yield {"progress": progress} # Send a progress update
|
|
|
|
|
|
|
|
|
149 |
except Exception as e:
|
150 |
logger.error(f"Error generating solution: {e}")
|
151 |
yield {"error": "Error generating solution"}
|