Debugging shit
Browse files
app.py
CHANGED
|
@@ -13,6 +13,7 @@ import textwrap
|
|
| 13 |
from datasets import load_dataset
|
| 14 |
from fastapi.responses import StreamingResponse
|
| 15 |
import random
|
|
|
|
| 16 |
|
| 17 |
# Set up logging
|
| 18 |
logging.basicConfig(level=logging.INFO)
|
|
@@ -115,10 +116,10 @@ Here's the complete Python function implementation:
|
|
| 115 |
formatted_code = extract_and_format_code(generated_text)
|
| 116 |
return {"solution": formatted_code}
|
| 117 |
|
| 118 |
-
def stream_solution(instruction: str, token: str):
|
| 119 |
if not verify_token(token):
|
| 120 |
raise Exception("Invalid token")
|
| 121 |
-
|
| 122 |
system_prompt = "You are a Python coding assistant specialized in solving LeetCode problems. Provide only the complete implementation of the given function. Ensure proper indentation and formatting. Do not include any explanations or multiple solutions."
|
| 123 |
full_prompt = f"""### Instruction:
|
| 124 |
{system_prompt}
|
|
@@ -132,23 +133,19 @@ Here's the complete Python function implementation:
|
|
| 132 |
|
| 133 |
```python
|
| 134 |
"""
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
try:
|
| 139 |
for chunk in llm(full_prompt, stream=True, **generation_kwargs):
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
except Exception as e:
|
| 145 |
logger.error(f"Error generating solution: {e}")
|
| 146 |
yield {"error": "Error generating solution"}
|
| 147 |
|
| 148 |
-
|
| 149 |
-
logger.info(f"Formatted code: {formatted_code}")
|
| 150 |
-
logger.info(f"Formatted code length: {len(formatted_code)}")
|
| 151 |
-
yield {"response": formatted_code}
|
| 152 |
|
| 153 |
def random_problem(token: str) -> Dict[str, Any]:
|
| 154 |
if not verify_token(token):
|
|
@@ -174,7 +171,7 @@ generate_interface = gr.Interface(
|
|
| 174 |
stream_interface = gr.Interface(
|
| 175 |
fn=stream_solution,
|
| 176 |
inputs=[gr.Textbox(label="Problem Instruction"), gr.Textbox(label="JWT Token")],
|
| 177 |
-
outputs=gr.
|
| 178 |
title="Stream Solution API",
|
| 179 |
description="Provide a LeetCode problem instruction and a valid JWT token to stream a solution."
|
| 180 |
)
|
|
@@ -193,5 +190,19 @@ demo = gr.TabbedInterface(
|
|
| 193 |
["Generate Solution", "Stream Solution", "Random Problem"]
|
| 194 |
)
|
| 195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
if __name__ == "__main__":
|
| 197 |
-
|
|
|
|
|
|
| 13 |
from datasets import load_dataset
|
| 14 |
from fastapi.responses import StreamingResponse
|
| 15 |
import random
|
| 16 |
+
import asyncio
|
| 17 |
|
| 18 |
# Set up logging
|
| 19 |
logging.basicConfig(level=logging.INFO)
|
|
|
|
| 116 |
formatted_code = extract_and_format_code(generated_text)
|
| 117 |
return {"solution": formatted_code}
|
| 118 |
|
| 119 |
+
async def stream_solution(instruction: str, token: str):
|
| 120 |
if not verify_token(token):
|
| 121 |
raise Exception("Invalid token")
|
| 122 |
+
|
| 123 |
system_prompt = "You are a Python coding assistant specialized in solving LeetCode problems. Provide only the complete implementation of the given function. Ensure proper indentation and formatting. Do not include any explanations or multiple solutions."
|
| 124 |
full_prompt = f"""### Instruction:
|
| 125 |
{system_prompt}
|
|
|
|
| 133 |
|
| 134 |
```python
|
| 135 |
"""
|
| 136 |
+
async def generate():
|
| 137 |
+
generated_text = ""
|
|
|
|
| 138 |
try:
|
| 139 |
for chunk in llm(full_prompt, stream=True, **generation_kwargs):
|
| 140 |
+
token = chunk["choices"][0]["text"]
|
| 141 |
+
generated_text += token
|
| 142 |
+
logger.info(f"Generated text: {generated_text}")
|
| 143 |
+
yield token # Yield individual tokens for streaming
|
| 144 |
except Exception as e:
|
| 145 |
logger.error(f"Error generating solution: {e}")
|
| 146 |
yield {"error": "Error generating solution"}
|
| 147 |
|
| 148 |
+
return generate() # Return the async generator
|
|
|
|
|
|
|
|
|
|
| 149 |
|
| 150 |
def random_problem(token: str) -> Dict[str, Any]:
|
| 151 |
if not verify_token(token):
|
|
|
|
| 171 |
stream_interface = gr.Interface(
|
| 172 |
fn=stream_solution,
|
| 173 |
inputs=[gr.Textbox(label="Problem Instruction"), gr.Textbox(label="JWT Token")],
|
| 174 |
+
outputs=gr.Text(), # Use gr.Text for streaming text
|
| 175 |
title="Stream Solution API",
|
| 176 |
description="Provide a LeetCode problem instruction and a valid JWT token to stream a solution."
|
| 177 |
)
|
|
|
|
| 190 |
["Generate Solution", "Stream Solution", "Random Problem"]
|
| 191 |
)
|
| 192 |
|
| 193 |
+
# Run the Gradio app
|
| 194 |
+
async def run(interface):
|
| 195 |
+
async with gr.Gradio().launch(interface) as app:
|
| 196 |
+
while True: # Continuous loop for handling new requests
|
| 197 |
+
instruction = await app.textboxes.get("Problem Instruction")
|
| 198 |
+
token = await app.textboxes.get("JWT Token")
|
| 199 |
+
|
| 200 |
+
try:
|
| 201 |
+
async for generated_token in await stream_solution(instruction, token):
|
| 202 |
+
await app.text.write(generated_token) # Update text box with each token
|
| 203 |
+
except Exception as e:
|
| 204 |
+
await app.text.write(f"Error: {e}")
|
| 205 |
+
|
| 206 |
if __name__ == "__main__":
|
| 207 |
+
loop = asyncio.get_event_loop()
|
| 208 |
+
loop.run_until_complete(run(demo))
|