sugiv commited on
Commit
beaa004
·
1 Parent(s): f08b4ba

Adding and testing explain solution

Browse files
Files changed (1) hide show
  1. app.py +28 -38
app.py CHANGED
@@ -11,7 +11,6 @@ import autopep8
11
  import textwrap
12
 
13
  from datasets import load_dataset
14
- from fastapi.responses import StreamingResponse
15
  import random
16
  import asyncio
17
 
@@ -42,6 +41,8 @@ model_path = hf_hub_download(repo_id=REPO_ID, filename=MODEL_NAME, cache_dir="./
42
  llm = Llama(model_path=model_path, n_ctx=1024, n_threads=8, n_gpu_layers=-1, verbose=False, mlock=True)
43
  logger.info("8-bit model loaded successfully")
44
 
 
 
45
  # Generation parameters
46
  generation_kwargs = {
47
  "max_tokens": 512,
@@ -90,36 +91,39 @@ def extract_and_format_code(text):
90
  return autopep8.fix_code(formatted_code)
91
  except:
92
  return formatted_code
93
-
94
- def generate_solution(instruction: str, token: str) -> Dict[str, Any]:
95
  if not verify_token(token):
96
  return {"error": "Invalid token"}
97
 
98
- system_prompt = "You are a Python coding assistant specialized in solving LeetCode problems. Provide only the complete implementation of the given function. Ensure proper indentation and formatting. Do not include any explanations or multiple solutions."
99
  full_prompt = f"""### Instruction:
100
  {system_prompt}
101
 
102
- Implement the following function for the LeetCode problem:
 
103
 
104
- {instruction}
 
 
 
105
 
106
  ### Response:
107
- Here's the complete Python function implementation:
108
 
109
- ```python
110
  """
111
 
112
  generated_text = ""
113
  for chunk in llm(full_prompt, stream=True, **generation_kwargs):
114
  generated_text += chunk["choices"][0]["text"]
115
 
116
- formatted_code = extract_and_format_code(generated_text)
117
- return {"solution": formatted_code}
118
 
119
- async def stream_solution(instruction: str, token: str):
120
- if not verify_token(token):
121
- raise Exception("Invalid token")
122
 
 
 
 
 
123
  system_prompt = "You are a Python coding assistant specialized in solving LeetCode problems. Provide only the complete implementation of the given function. Ensure proper indentation and formatting. Do not include any explanations or multiple solutions."
124
  full_prompt = f"""### Instruction:
125
  {system_prompt}
@@ -133,20 +137,14 @@ Here's the complete Python function implementation:
133
 
134
  ```python
135
  """
136
- async def generate():
137
- generated_text = ""
138
- try:
139
- for chunk in llm(full_prompt, stream=True, **generation_kwargs):
140
- token = chunk["choices"][0]["text"]
141
- generated_text += token
142
- logger.info(f"Generated text: {generated_text}")
143
- yield token # Yield individual tokens for streaming
144
- except Exception as e:
145
- logger.error(f"Error generating solution: {e}")
146
- yield {"error": "Error generating solution"}
147
-
148
- async for token in generate():
149
- yield token
150
 
151
  def random_problem(token: str) -> Dict[str, Any]:
152
  if not verify_token(token):
@@ -157,6 +155,7 @@ def random_problem(token: str) -> Dict[str, Any]:
157
 
158
  # Extract the instruction (problem statement) from the randomly selected item
159
  problem = random_item['instruction']
 
160
 
161
  return {"problem": problem}
162
 
@@ -169,14 +168,6 @@ generate_interface = gr.Interface(
169
  description="Provide a LeetCode problem instruction and a valid JWT token to generate a solution."
170
  )
171
 
172
- stream_interface = gr.Interface(
173
- fn=stream_solution,
174
- inputs=[gr.Textbox(label="Problem Instruction"), gr.Textbox(label="JWT Token")],
175
- outputs=gr.Text(),
176
- title="Stream Solution API",
177
- description="Provide a LeetCode problem instruction and a valid JWT token to stream a solution."
178
- )
179
-
180
  random_problem_interface = gr.Interface(
181
  fn=random_problem,
182
  inputs=gr.Textbox(label="JWT Token"),
@@ -185,10 +176,9 @@ random_problem_interface = gr.Interface(
185
  description="Provide a valid JWT token to get a random LeetCode problem."
186
  )
187
 
188
- # Combine interfaces
189
  demo = gr.TabbedInterface(
190
- [generate_interface, stream_interface, random_problem_interface],
191
- ["Generate Solution", "Stream Solution", "Random Problem"]
192
  )
193
 
194
  # Launch the Gradio app
 
11
  import textwrap
12
 
13
  from datasets import load_dataset
 
14
  import random
15
  import asyncio
16
 
 
41
  llm = Llama(model_path=model_path, n_ctx=1024, n_threads=8, n_gpu_layers=-1, verbose=False, mlock=True)
42
  logger.info("8-bit model loaded successfully")
43
 
44
+ user_data = {}
45
+
46
  # Generation parameters
47
  generation_kwargs = {
48
  "max_tokens": 512,
 
91
  return autopep8.fix_code(formatted_code)
92
  except:
93
  return formatted_code
94
+
95
+ def generate_explanation(problem: str, solution: str, token: str) -> Dict[str, Any]:
96
  if not verify_token(token):
97
  return {"error": "Invalid token"}
98
 
99
+ system_prompt = "You are a Python coding assistant specialized in explaining LeetCode problem solutions. Provide a clear and concise explanation of the given solution."
100
  full_prompt = f"""### Instruction:
101
  {system_prompt}
102
 
103
+ Problem:
104
+ {problem}
105
 
106
+ Solution:
107
+ {solution}
108
+
109
+ Explain this solution step by step.
110
 
111
  ### Response:
112
+ Here's the explanation of the solution:
113
 
 
114
  """
115
 
116
  generated_text = ""
117
  for chunk in llm(full_prompt, stream=True, **generation_kwargs):
118
  generated_text += chunk["choices"][0]["text"]
119
 
120
+ return {"explanation": generated_text}
 
121
 
 
 
 
122
 
123
+ def generate_solution(instruction: str, token: str) -> Dict[str, Any]:
124
+ if not verify_token(token):
125
+ return {"error": "Invalid token"}
126
+
127
  system_prompt = "You are a Python coding assistant specialized in solving LeetCode problems. Provide only the complete implementation of the given function. Ensure proper indentation and formatting. Do not include any explanations or multiple solutions."
128
  full_prompt = f"""### Instruction:
129
  {system_prompt}
 
137
 
138
  ```python
139
  """
140
+
141
+ generated_text = ""
142
+ for chunk in llm(full_prompt, stream=True, **generation_kwargs):
143
+ generated_text += chunk["choices"][0]["text"]
144
+
145
+ formatted_code = extract_and_format_code(generated_text)
146
+ user_data[token] = {"problem": instruction, "solution": formatted_code}
147
+ return {"solution": formatted_code}
 
 
 
 
 
 
148
 
149
  def random_problem(token: str) -> Dict[str, Any]:
150
  if not verify_token(token):
 
155
 
156
  # Extract the instruction (problem statement) from the randomly selected item
157
  problem = random_item['instruction']
158
+ user_data[token] = {"problem": problem, "solution": None}
159
 
160
  return {"problem": problem}
161
 
 
168
  description="Provide a LeetCode problem instruction and a valid JWT token to generate a solution."
169
  )
170
 
 
 
 
 
 
 
 
 
171
  random_problem_interface = gr.Interface(
172
  fn=random_problem,
173
  inputs=gr.Textbox(label="JWT Token"),
 
176
  description="Provide a valid JWT token to get a random LeetCode problem."
177
  )
178
 
 
179
  demo = gr.TabbedInterface(
180
+ [generate_interface, explain_interface, random_problem_interface],
181
+ ["Generate Solution", "Explain Solution", "Random Problem"]
182
  )
183
 
184
  # Launch the Gradio app