Spaces:
Sleeping
Sleeping
Leetmonkey In Action. Darn LeetMonkey these days
Browse files
app.py
CHANGED
|
@@ -128,6 +128,38 @@ def update_solution(problem, model_name):
|
|
| 128 |
logger.info("Solution generated successfully")
|
| 129 |
return formatted_code
|
| 130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
with gr.Blocks() as demo:
|
| 132 |
gr.Markdown("# LeetCode Problem Solver")
|
| 133 |
|
|
@@ -142,7 +174,7 @@ with gr.Blocks() as demo:
|
|
| 142 |
generate_btn = gr.Button("Generate Solution")
|
| 143 |
|
| 144 |
select_problem_btn.click(select_random_problem, outputs=problem_display)
|
| 145 |
-
generate_btn.click(
|
| 146 |
|
| 147 |
if __name__ == "__main__":
|
| 148 |
logger.info("Starting Gradio interface")
|
|
|
|
| 128 |
logger.info("Solution generated successfully")
|
| 129 |
return formatted_code
|
| 130 |
|
| 131 |
+
def stream_solution(problem, model_name):
|
| 132 |
+
if model_name == "Q8_0 (8-bit)":
|
| 133 |
+
model = llm
|
| 134 |
+
else:
|
| 135 |
+
model_path = download_model(gguf_models[model_name])
|
| 136 |
+
model = Llama(model_path=model_path, n_ctx=2048, n_threads=4, n_gpu_layers=0, verbose=False)
|
| 137 |
+
|
| 138 |
+
logger.info(f"Generating solution using {model_name} model")
|
| 139 |
+
system_prompt = "You are a Python coding assistant specialized in solving LeetCode problems. Provide only the complete implementation of the given function. Ensure proper indentation and formatting. Do not include any explanations or multiple solutions."
|
| 140 |
+
full_prompt = f"""### Instruction:
|
| 141 |
+
{system_prompt}
|
| 142 |
+
|
| 143 |
+
Implement the following function for the LeetCode problem:
|
| 144 |
+
|
| 145 |
+
{problem}
|
| 146 |
+
|
| 147 |
+
### Response:
|
| 148 |
+
Here's the complete Python function implementation:
|
| 149 |
+
|
| 150 |
+
```python
|
| 151 |
+
"""
|
| 152 |
+
|
| 153 |
+
generated_text = ""
|
| 154 |
+
for chunk in model(full_prompt, stream=True, **generation_kwargs):
|
| 155 |
+
token = chunk["choices"]["text"]
|
| 156 |
+
generated_text += token
|
| 157 |
+
yield generated_text
|
| 158 |
+
|
| 159 |
+
formatted_code = extract_and_format_code(generated_text)
|
| 160 |
+
logger.info("Solution generated successfully")
|
| 161 |
+
yield formatted_code
|
| 162 |
+
|
| 163 |
with gr.Blocks() as demo:
|
| 164 |
gr.Markdown("# LeetCode Problem Solver")
|
| 165 |
|
|
|
|
| 174 |
generate_btn = gr.Button("Generate Solution")
|
| 175 |
|
| 176 |
select_problem_btn.click(select_random_problem, outputs=problem_display)
|
| 177 |
+
generate_btn.click(stream_solution, inputs=[problem_display, model_dropdown], outputs=solution_display)
|
| 178 |
|
| 179 |
if __name__ == "__main__":
|
| 180 |
logger.info("Starting Gradio interface")
|