IliaLarchenko commited on
Commit
56bd1e9
·
1 Parent(s): bbb0e13

Comments and docstrings improvement

Browse files
Files changed (1) hide show
  1. api/llm.py +166 -31
api/llm.py CHANGED
@@ -2,17 +2,30 @@ import os
2
  from openai import OpenAI
3
  import anthropic
4
  from utils.errors import APIError
5
- from typing import List, Dict, Generator, Optional, Tuple
 
6
 
7
 
8
  class PromptManager:
9
  def __init__(self, prompts: Dict[str, str]):
10
- self.prompts = prompts
11
- self.limit = os.getenv("DEMO_WORD_LIMIT")
 
 
 
 
 
 
12
 
13
  def add_limit(self, prompt: str) -> str:
14
  """
15
  Add word limit to the prompt if specified in the environment variables.
 
 
 
 
 
 
16
  """
17
  if self.limit:
18
  prompt += f" Keep your responses very short and simple, no more than {self.limit} words."
@@ -21,6 +34,15 @@ class PromptManager:
21
  def get_system_prompt(self, key: str) -> str:
22
  """
23
  Retrieve and limit a system prompt by its key.
 
 
 
 
 
 
 
 
 
24
  """
25
  prompt = self.prompts[key]
26
  return self.add_limit(prompt)
@@ -30,13 +52,29 @@ class PromptManager:
30
  ) -> str:
31
  """
32
  Create a problem requirements prompt with optional parameters.
 
 
 
 
 
 
 
 
 
33
  """
34
  prompt = f"Create a {type} problem. Difficulty: {difficulty}. Topic: {topic}. Additional requirements: {requirements}."
35
  return self.add_limit(prompt)
36
 
37
 
38
  class LLMManager:
39
- def __init__(self, config, prompts: Dict[str, str]):
 
 
 
 
 
 
 
40
  self.config = config
41
  self.llm_type = config.llm.type
42
  if self.llm_type == "ANTHROPIC_API":
@@ -53,18 +91,38 @@ class LLMManager:
53
  def get_text(self, messages: List[Dict[str, str]], stream: Optional[bool] = None) -> Generator[str, None, None]:
54
  """
55
  Generate text from the LLM, optionally streaming the response.
 
 
 
 
 
 
 
 
 
 
56
  """
57
  if stream is None:
58
  stream = self.streaming
59
  try:
60
  if self.llm_type == "OPENAI_API":
61
- return self._get_text_openai(messages, stream)
62
  elif self.llm_type == "ANTHROPIC_API":
63
- return self._get_text_anthropic(messages, stream)
64
  except Exception as e:
65
  raise APIError(f"LLM Get Text Error: Unexpected error: {e}")
66
 
67
  def _get_text_openai(self, messages: List[Dict[str, str]], stream: bool) -> Generator[str, None, None]:
 
 
 
 
 
 
 
 
 
 
68
  if not stream:
69
  response = self.client.chat.completions.create(model=self.config.llm.name, messages=messages, temperature=1, max_tokens=2000)
70
  yield response.choices[0].message.content.strip()
@@ -77,9 +135,39 @@ class LLMManager:
77
  yield chunk.choices[0].delta.content
78
 
79
  def _get_text_anthropic(self, messages: List[Dict[str, str]], stream: bool) -> Generator[str, None, None]:
80
- # I convert the messages every time to the Anthropics format
81
- # It is not optimal way to do it, we can instead support the messages format from the beginning
82
- # But it duplicates the code and I don't want to do it now
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  system_message = None
84
  consolidated_messages = []
85
 
@@ -95,39 +183,43 @@ class LLMManager:
95
  else:
96
  consolidated_messages.append(message.copy())
97
 
98
- if not stream:
99
- response = self.client.messages.create(
100
- model=self.config.llm.name, max_tokens=2000, temperature=1, system=system_message, messages=consolidated_messages
101
- )
102
- yield response.content[0].text
103
- else:
104
- with self.client.messages.stream(
105
- model=self.config.llm.name, max_tokens=2000, temperature=1, system=system_message, messages=consolidated_messages
106
- ) as stream:
107
- yield from stream.text_stream
108
 
109
- def test_llm(self, stream=False) -> bool:
110
  """
111
  Test the LLM connection with or without streaming.
 
 
 
 
 
 
112
  """
113
  try:
114
- list(
115
- self.get_text(
116
- [
117
- {"role": "system", "content": "You just help me test the connection."},
118
- {"role": "user", "content": "Hi!"},
119
- {"role": "user", "content": "Ping!"},
120
- ],
121
- stream=stream,
122
- )
123
- )
124
  return True
125
- except:
 
 
 
 
126
  return False
127
 
128
  def init_bot(self, problem: str, interview_type: str = "coding") -> List[Dict[str, str]]:
129
  """
130
  Initialize the bot with a system prompt and problem description.
 
 
 
 
 
 
 
131
  """
132
  system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_interviewer_prompt")
133
  return [{"role": "system", "content": f"{system_prompt}\nThe candidate is solving the following problem:\n {problem}"}]
@@ -135,6 +227,15 @@ class LLMManager:
135
  def get_problem_prepare_messages(self, requirements: str, difficulty: str, topic: str, interview_type: str) -> List[Dict[str, str]]:
136
  """
137
  Prepare messages for generating a problem based on given requirements.
 
 
 
 
 
 
 
 
 
138
  """
139
  system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_problem_generation_prompt")
140
  full_prompt = self.prompt_manager.get_problem_requirements_prompt(interview_type, difficulty, topic, requirements)
@@ -146,6 +247,15 @@ class LLMManager:
146
  def get_problem(self, requirements: str, difficulty: str, topic: str, interview_type: str) -> Generator[str, None, None]:
147
  """
148
  Get a problem from the LLM based on the given requirements, difficulty, and topic.
 
 
 
 
 
 
 
 
 
149
  """
150
  messages = self.get_problem_prepare_messages(requirements, difficulty, topic, interview_type)
151
  problem = ""
@@ -158,6 +268,15 @@ class LLMManager:
158
  ) -> List[Dict[str, str]]:
159
  """
160
  Update chat history with the latest user message and code.
 
 
 
 
 
 
 
 
 
161
  """
162
  message = chat_display[-1][0]
163
  if code != previous_code:
@@ -170,6 +289,14 @@ class LLMManager:
170
  ) -> List[Dict[str, str]]:
171
  """
172
  Prepare messages to end the interview and generate feedback.
 
 
 
 
 
 
 
 
173
  """
174
  transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]
175
  system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_grading_feedback_prompt")
@@ -185,6 +312,14 @@ class LLMManager:
185
  ) -> Generator[str, None, None]:
186
  """
187
  End the interview and get feedback from the LLM.
 
 
 
 
 
 
 
 
188
  """
189
  if len(chat_history) <= 2:
190
  yield "No interview history available"
 
2
  from openai import OpenAI
3
  import anthropic
4
  from utils.errors import APIError
5
+ from typing import List, Dict, Generator, Optional, Tuple, Any
6
+ import logging
7
 
8
 
9
  class PromptManager:
10
  def __init__(self, prompts: Dict[str, str]):
11
+ """
12
+ Initialize the PromptManager.
13
+
14
+ Args:
15
+ prompts (Dict[str, str]): A dictionary of prompt keys and their corresponding text.
16
+ """
17
+ self.prompts: Dict[str, str] = prompts
18
+ self.limit: Optional[str] = os.getenv("DEMO_WORD_LIMIT")
19
 
20
  def add_limit(self, prompt: str) -> str:
21
  """
22
  Add word limit to the prompt if specified in the environment variables.
23
+
24
+ Args:
25
+ prompt (str): The original prompt.
26
+
27
+ Returns:
28
+ str: The prompt with added word limit if applicable.
29
  """
30
  if self.limit:
31
  prompt += f" Keep your responses very short and simple, no more than {self.limit} words."
 
34
  def get_system_prompt(self, key: str) -> str:
35
  """
36
  Retrieve and limit a system prompt by its key.
37
+
38
+ Args:
39
+ key (str): The key for the desired prompt.
40
+
41
+ Returns:
42
+ str: The retrieved prompt with added word limit if applicable.
43
+
44
+ Raises:
45
+ KeyError: If the key is not found in the prompts dictionary.
46
  """
47
  prompt = self.prompts[key]
48
  return self.add_limit(prompt)
 
52
  ) -> str:
53
  """
54
  Create a problem requirements prompt with optional parameters.
55
+
56
+ Args:
57
+ type (str): The type of problem.
58
+ difficulty (Optional[str]): The difficulty level of the problem.
59
+ topic (Optional[str]): The topic of the problem.
60
+ requirements (Optional[str]): Additional requirements for the problem.
61
+
62
+ Returns:
63
+ str: The constructed problem requirements prompt.
64
  """
65
  prompt = f"Create a {type} problem. Difficulty: {difficulty}. Topic: {topic}. Additional requirements: {requirements}."
66
  return self.add_limit(prompt)
67
 
68
 
69
  class LLMManager:
70
+ def __init__(self, config: Any, prompts: Dict[str, str]):
71
+ """
72
+ Initialize the LLMManager.
73
+
74
+ Args:
75
+ config (Any): Configuration object containing LLM settings.
76
+ prompts (Dict[str, str]): A dictionary of prompts for the PromptManager.
77
+ """
78
  self.config = config
79
  self.llm_type = config.llm.type
80
  if self.llm_type == "ANTHROPIC_API":
 
91
  def get_text(self, messages: List[Dict[str, str]], stream: Optional[bool] = None) -> Generator[str, None, None]:
92
  """
93
  Generate text from the LLM, optionally streaming the response.
94
+
95
+ Args:
96
+ messages (List[Dict[str, str]]): List of message dictionaries.
97
+ stream (Optional[bool]): Whether to stream the response. Defaults to self.streaming if not provided.
98
+
99
+ Yields:
100
+ str: Generated text chunks.
101
+
102
+ Raises:
103
+ APIError: If an unexpected error occurs during text generation.
104
  """
105
  if stream is None:
106
  stream = self.streaming
107
  try:
108
  if self.llm_type == "OPENAI_API":
109
+ yield from self._get_text_openai(messages, stream)
110
  elif self.llm_type == "ANTHROPIC_API":
111
+ yield from self._get_text_anthropic(messages, stream)
112
  except Exception as e:
113
  raise APIError(f"LLM Get Text Error: Unexpected error: {e}")
114
 
115
  def _get_text_openai(self, messages: List[Dict[str, str]], stream: bool) -> Generator[str, None, None]:
116
+ """
117
+ Generate text using OpenAI API.
118
+
119
+ Args:
120
+ messages (List[Dict[str, str]]): List of message dictionaries.
121
+ stream (bool): Whether to stream the response.
122
+
123
+ Yields:
124
+ str: Generated text chunks.
125
+ """
126
  if not stream:
127
  response = self.client.chat.completions.create(model=self.config.llm.name, messages=messages, temperature=1, max_tokens=2000)
128
  yield response.choices[0].message.content.strip()
 
135
  yield chunk.choices[0].delta.content
136
 
137
  def _get_text_anthropic(self, messages: List[Dict[str, str]], stream: bool) -> Generator[str, None, None]:
138
+ """
139
+ Generate text using Anthropic API.
140
+
141
+ Args:
142
+ messages (List[Dict[str, str]]): List of message dictionaries.
143
+ stream (bool): Whether to stream the response.
144
+
145
+ Yields:
146
+ str: Generated text chunks.
147
+ """
148
+ system_message, consolidated_messages = self._prepare_anthropic_messages(messages)
149
+
150
+ if not stream:
151
+ response = self.client.messages.create(
152
+ model=self.config.llm.name, max_tokens=2000, temperature=1, system=system_message, messages=consolidated_messages
153
+ )
154
+ yield response.content[0].text
155
+ else:
156
+ with self.client.messages.stream(
157
+ model=self.config.llm.name, max_tokens=2000, temperature=1, system=system_message, messages=consolidated_messages
158
+ ) as stream:
159
+ yield from stream.text_stream
160
+
161
+ def _prepare_anthropic_messages(self, messages: List[Dict[str, str]]) -> Tuple[Optional[str], List[Dict[str, str]]]:
162
+ """
163
+ Prepare messages for Anthropic API format.
164
+
165
+ Args:
166
+ messages (List[Dict[str, str]]): Original messages in OpenAI format.
167
+
168
+ Returns:
169
+ Tuple[Optional[str], List[Dict[str, str]]]: Tuple containing system message and consolidated messages.
170
+ """
171
  system_message = None
172
  consolidated_messages = []
173
 
 
183
  else:
184
  consolidated_messages.append(message.copy())
185
 
186
+ return system_message, consolidated_messages
 
 
 
 
 
 
 
 
 
187
 
188
+ def test_llm(self, stream: bool = False) -> bool:
189
  """
190
  Test the LLM connection with or without streaming.
191
+
192
+ Args:
193
+ stream (bool): Whether to test streaming functionality.
194
+
195
+ Returns:
196
+ bool: True if the test is successful, False otherwise.
197
  """
198
  try:
199
+ test_messages = [
200
+ {"role": "system", "content": "You just help me test the connection."},
201
+ {"role": "user", "content": "Hi!"},
202
+ {"role": "user", "content": "Ping!"},
203
+ ]
204
+ list(self.get_text(test_messages, stream=stream))
 
 
 
 
205
  return True
206
+ except APIError as e:
207
+ logging.error(f"LLM test failed: {e}")
208
+ return False
209
+ except Exception as e:
210
+ logging.error(f"Unexpected error during LLM test: {e}")
211
  return False
212
 
213
  def init_bot(self, problem: str, interview_type: str = "coding") -> List[Dict[str, str]]:
214
  """
215
  Initialize the bot with a system prompt and problem description.
216
+
217
+ Args:
218
+ problem (str): The problem description.
219
+ interview_type (str): The type of interview. Defaults to "coding".
220
+
221
+ Returns:
222
+ List[Dict[str, str]]: Initial messages for the bot.
223
  """
224
  system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_interviewer_prompt")
225
  return [{"role": "system", "content": f"{system_prompt}\nThe candidate is solving the following problem:\n {problem}"}]
 
227
  def get_problem_prepare_messages(self, requirements: str, difficulty: str, topic: str, interview_type: str) -> List[Dict[str, str]]:
228
  """
229
  Prepare messages for generating a problem based on given requirements.
230
+
231
+ Args:
232
+ requirements (str): Specific requirements for the problem.
233
+ difficulty (str): Difficulty level of the problem.
234
+ topic (str): Topic of the problem.
235
+ interview_type (str): Type of interview.
236
+
237
+ Returns:
238
+ List[Dict[str, str]]: Prepared messages for problem generation.
239
  """
240
  system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_problem_generation_prompt")
241
  full_prompt = self.prompt_manager.get_problem_requirements_prompt(interview_type, difficulty, topic, requirements)
 
247
  def get_problem(self, requirements: str, difficulty: str, topic: str, interview_type: str) -> Generator[str, None, None]:
248
  """
249
  Get a problem from the LLM based on the given requirements, difficulty, and topic.
250
+
251
+ Args:
252
+ requirements (str): Specific requirements for the problem.
253
+ difficulty (str): Difficulty level of the problem.
254
+ topic (str): Topic of the problem.
255
+ interview_type (str): Type of interview.
256
+
257
+ Yields:
258
+ str: Incrementally generated problem statement.
259
  """
260
  messages = self.get_problem_prepare_messages(requirements, difficulty, topic, interview_type)
261
  problem = ""
 
268
  ) -> List[Dict[str, str]]:
269
  """
270
  Update chat history with the latest user message and code.
271
+
272
+ Args:
273
+ code (str): Current code.
274
+ previous_code (str): Previous code.
275
+ chat_history (List[Dict[str, str]]): Current chat history.
276
+ chat_display (List[List[Optional[str]]]): Current chat display.
277
+
278
+ Returns:
279
+ List[Dict[str, str]]: Updated chat history.
280
  """
281
  message = chat_display[-1][0]
282
  if code != previous_code:
 
289
  ) -> List[Dict[str, str]]:
290
  """
291
  Prepare messages to end the interview and generate feedback.
292
+
293
+ Args:
294
+ problem_description (str): The original problem description.
295
+ chat_history (List[Dict[str, str]]): The chat history.
296
+ interview_type (str): The type of interview.
297
+
298
+ Returns:
299
+ List[Dict[str, str]]: Prepared messages for generating feedback.
300
  """
301
  transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]
302
  system_prompt = self.prompt_manager.get_system_prompt(f"{interview_type}_grading_feedback_prompt")
 
312
  ) -> Generator[str, None, None]:
313
  """
314
  End the interview and get feedback from the LLM.
315
+
316
+ Args:
317
+ problem_description (str): The original problem description.
318
+ chat_history (List[Dict[str, str]]): The chat history.
319
+ interview_type (str): The type of interview. Defaults to "coding".
320
+
321
+ Yields:
322
+ str: Incrementally generated feedback.
323
  """
324
  if len(chat_history) <= 2:
325
  yield "No interview history available"