Ali2206 commited on
Commit
bfcd4eb
·
verified ·
1 Parent(s): 32e4e6a

Update src/txagent/txagent.py

Browse files
Files changed (1) hide show
  1. src/txagent/txagent.py +100 -379
src/txagent/txagent.py CHANGED
@@ -5,7 +5,7 @@ import gc
5
  import numpy as np
6
  from vllm import LLM, SamplingParams
7
  from jinja2 import Template
8
- from typing import List, Dict, Optional, Union, Generator
9
  import types
10
  from tooluniverse import ToolUniverse
11
  from .toolrag import ToolRAGModel
@@ -40,25 +40,6 @@ class TxAgent:
40
  additional_default_tools: Optional[List] = None):
41
  """
42
  Initialize the TxAgent with specified configuration.
43
-
44
- Args:
45
- model_name: Name of the main LLM model
46
- rag_model_name: Name of the RAG model
47
- tool_files_dict: Dictionary of tool files
48
- enable_finish: Whether to enable the Finish tool
49
- enable_rag: Whether to enable RAG functionality
50
- enable_summary: Whether to enable summarization
51
- init_rag_num: Initial number of RAG tools to retrieve
52
- step_rag_num: Number of RAG tools to retrieve per step
53
- summary_mode: Mode for summarization ('step' or 'length')
54
- summary_skip_last_k: Number of last steps to skip in summarization
55
- summary_context_length: Context length threshold for summarization
56
- force_finish: Whether to force finish when max rounds reached
57
- avoid_repeat: Whether to avoid repeating similar responses
58
- seed: Random seed for reproducibility
59
- enable_checker: Whether to enable reasoning trace checker
60
- enable_chat: Whether to enable chat mode
61
- additional_default_tools: Additional tools to include by default
62
  """
63
  self.model_name = model_name
64
  self.tokenizer = None
@@ -94,12 +75,6 @@ class TxAgent:
94
  def load_models(self, model_name: Optional[str] = None) -> str:
95
  """
96
  Load the specified model or the default model if none specified.
97
-
98
- Args:
99
- model_name: Name of the model to load
100
-
101
- Returns:
102
- Status message indicating success or failure
103
  """
104
  if model_name is not None:
105
  if model_name == self.model_name:
@@ -140,123 +115,97 @@ class TxAgent:
140
  logger.error("Failed to load tools: %s", str(e))
141
  raise RuntimeError(f"Failed to load tools: {str(e)}")
142
 
143
- def load_tool_desc_embedding(self) -> None:
144
- """Load tool description embeddings from cache or generate new ones."""
145
- cache_path = os.path.join(os.path.dirname(self.tool_files_dict["new_tool"]), "tool_embeddings.pkl")
146
- try:
147
- if os.path.exists(cache_path):
148
- self.rag_model.load_cached_embeddings(cache_path)
149
- else:
150
- self.rag_model.load_tool_desc_embedding(self.tooluniverse)
151
- self.rag_model.save_embeddings(cache_path)
152
- logger.info("Tool description embeddings loaded successfully")
153
- except Exception as e:
154
- logger.error("Failed to load tool embeddings: %s", str(e))
155
- raise RuntimeError(f"Failed to load tool embeddings: {str(e)}")
156
-
157
- def rag_infer(self, query: str, top_k: int = 5) -> List[str]:
158
- """
159
- Perform RAG inference to retrieve relevant tools.
160
-
161
- Args:
162
- query: The query to search for
163
- top_k: Number of top results to return
164
-
165
- Returns:
166
- List of relevant tool names
167
- """
168
- if not self.enable_rag:
169
- return []
170
- return self.rag_model.rag_infer(query, top_k)
171
-
172
- def initialize_conversation(self,
173
- message: str,
174
- conversation: Optional[List[Dict]] = None,
175
- history: Optional[List[Dict]] = None) -> List[Dict]:
176
  """
177
- Initialize or extend a conversation with the given message and history.
178
-
179
- Args:
180
- message: The new message to add
181
- conversation: Existing conversation to extend
182
- history: Chat history to incorporate
183
-
184
- Returns:
185
- Updated conversation list
186
  """
187
- if conversation is None:
188
- conversation = []
 
 
 
 
 
189
 
 
190
  conversation = self.set_system_prompt(conversation, self.prompt_multi_step)
191
- if history:
192
- for msg in history:
193
- if msg['role'] == 'user':
194
- conversation.append({"role": "user", "content": msg['content']})
195
- elif msg['role'] == 'assistant':
196
- conversation.append({"role": "assistant", "content": msg['content']})
197
  conversation.append({"role": "user", "content": message})
198
- logger.debug("Conversation initialized with %d messages", len(conversation))
199
- return conversation
200
-
201
- def tool_RAG(self,
202
- message: Optional[str] = None,
203
- picked_tool_names: Optional[List[str]] = None,
204
- existing_tools_prompt: List = [],
205
- rag_num: int = 0,
206
- return_call_result: bool = False) -> Union[List, Tuple[List, List]]:
207
- """
208
- Retrieve relevant tools using RAG.
209
 
210
- Args:
211
- message: The query message for RAG
212
- picked_tool_names: Pre-selected tool names
213
- existing_tools_prompt: Existing tools to include
214
- rag_num: Number of tools to retrieve
215
- return_call_result: Whether to return tool names
216
-
217
- Returns:
218
- List of tool prompts or tuple with tool names if return_call_result is True
219
- """
220
- if not self.enable_rag:
221
- return [] if not return_call_result else ([], [])
222
 
223
- extra_factor = 10
224
- if picked_tool_names is None:
225
- if message is None:
226
- raise ValueError("Either message or picked_tool_names must be provided")
227
- picked_tool_names = self.rag_infer(message, top_k=rag_num * extra_factor)
 
 
 
 
 
 
 
228
 
229
- picked_tool_names_no_special = [
230
- tool for tool in picked_tool_names
231
- if tool not in self.special_tools_name
232
- ]
233
- picked_tool_names = picked_tool_names_no_special[:rag_num]
 
 
234
 
235
- picked_tools = self.tooluniverse.get_tool_by_name(picked_tool_names)
236
- picked_tools_prompt = self.tooluniverse.prepare_tool_prompts(picked_tools)
237
- logger.debug("Retrieved %d tools via RAG", len(picked_tools_prompt))
238
-
239
- if return_call_result:
240
- return picked_tools_prompt, picked_tool_names
241
- return picked_tools_prompt
242
 
243
- def add_special_tools(self, tools: List, call_agent: bool = False) -> List:
244
- """Add special tools (Finish and optionally CallAgent) to the tools list."""
245
- if self.enable_finish:
246
- tools.append(self.tooluniverse.get_one_tool_by_one_name('Finish', return_prompt=True))
247
- logger.debug("Finish tool added")
248
- if call_agent:
249
- tools.append(self.tooluniverse.get_one_tool_by_one_name('CallAgent', return_prompt=True))
250
- logger.debug("CallAgent tool added")
251
- return tools
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
 
253
- def set_system_prompt(self, conversation: List[Dict], sys_prompt: str) -> List[Dict]:
254
- """Set or update the system prompt in the conversation."""
255
- if not conversation:
256
- conversation.append({"role": "system", "content": sys_prompt})
257
- else:
258
- conversation[0] = {"role": "system", "content": sys_prompt}
259
- return conversation
260
 
261
  def run_function_call(self,
262
  fcall_str: str,
@@ -268,18 +217,6 @@ class TxAgent:
268
  temperature: Optional[float] = None) -> Tuple[List[Dict], List, str]:
269
  """
270
  Execute function calls from the model's output.
271
-
272
- Args:
273
- fcall_str: The function call string from the model
274
- return_message: Whether to return the message part
275
- existing_tools_prompt: Existing tools to consider
276
- message_for_call_agent: Original message for CallAgent
277
- call_agent: Whether CallAgent is enabled
278
- call_agent_level: Current CallAgent level
279
- temperature: Temperature for sub-agent calls
280
-
281
- Returns:
282
- Tuple of (revised_messages, tools_prompt, special_tool_call)
283
  """
284
  try:
285
  function_call_json, message = self.tooluniverse.extract_function_call_json(
@@ -304,11 +241,15 @@ class TxAgent:
304
  full_message = (
305
  (message_for_call_agent or "") +
306
  "\nYou must follow the following plan to answer the question: " +
307
- str(solution_plan)
308
  call_result = self.run_multistep_agent(
309
- full_message, temperature=temperature,
310
- max_new_tokens=512, max_token=131072,
311
- call_agent=False, call_agent_level=call_agent_level)
 
 
 
 
312
  if call_result is None:
313
  call_result = "⚠️ No content returned from sub-agent."
314
  else:
@@ -317,6 +258,7 @@ class TxAgent:
317
  call_result = "Error: CallAgent disabled."
318
  else:
319
  call_result = self.tooluniverse.run_one_function(function_call_json[i])
 
320
  call_id = self.tooluniverse.call_id_gen()
321
  function_call_json[i]["call_id"] = call_id
322
  logger.info("Tool Call Result: %s", call_result)
@@ -328,45 +270,27 @@ class TxAgent:
328
  "call_id": call_id
329
  })
330
  })
331
- else:
332
- call_results.append({
333
- "role": "tool",
334
- "content": json.dumps({"content": "Invalid or no function call detected."})
335
- })
336
 
337
  revised_messages = [{
338
  "role": "assistant",
339
  "content": message.strip(),
340
  "tool_calls": json.dumps(function_call_json)
341
  }] + call_results
 
342
  return revised_messages, existing_tools_prompt or [], special_tool_call
343
 
344
  def llm_infer(self,
345
- messages: List[Dict],
346
- temperature: float = 0.1,
347
- tools: Optional[List] = None,
348
- output_begin_string: Optional[str] = None,
349
- max_new_tokens: int = 512,
350
- max_token: int = 131072,
351
- skip_special_tokens: bool = True,
352
- model: Optional[LLM] = None,
353
- check_token_status: bool = False) -> Union[str, Tuple[str, bool]]:
354
  """
355
  Perform inference using the LLM.
356
-
357
- Args:
358
- messages: Conversation history
359
- temperature: Sampling temperature
360
- tools: List of tools to include
361
- output_begin_string: Prefix for output
362
- max_new_tokens: Maximum new tokens to generate
363
- max_token: Maximum total tokens allowed
364
- skip_special_tokens: Whether to skip special tokens
365
- model: Optional custom model to use
366
- check_token_status: Whether to check token limits
367
-
368
- Returns:
369
- Generated text or tuple with text and overflow flag if check_token_status
370
  """
371
  model = model or self.model
372
  tokenizer = self.tokenizer
@@ -409,209 +333,6 @@ class TxAgent:
409
  logger.error("Inference failed: %s", str(e))
410
  raise RuntimeError(f"Inference failed: {str(e)}")
411
 
412
- def run_multistep_agent(self,
413
- message: str,
414
- temperature: float,
415
- max_new_tokens: int,
416
- max_token: int,
417
- max_round: int = 5,
418
- call_agent: bool = False,
419
- call_agent_level: int = 0) -> Optional[str]:
420
- """
421
- Run multi-step reasoning with the agent.
422
-
423
- Args:
424
- message: Input message
425
- temperature: Sampling temperature
426
- max_new_tokens: Max new tokens per step
427
- max_token: Max total tokens
428
- max_round: Maximum reasoning rounds
429
- call_agent: Whether to enable CallAgent
430
- call_agent_level: Current CallAgent level
431
-
432
- Returns:
433
- Final answer or None if failed
434
- """
435
- logger.info("Starting multistep agent for message: %s", message[:100])
436
- picked_tools_prompt, call_agent_level = self.initialize_tools_prompt(
437
- call_agent, call_agent_level, message)
438
- conversation = self.initialize_conversation(message)
439
- outputs = []
440
- last_outputs = []
441
- next_round = True
442
- current_round = 0
443
- token_overflow = False
444
- enable_summary = False
445
- last_status = {}
446
-
447
- while next_round and current_round < max_round:
448
- current_round += 1
449
- if len(outputs) > 0:
450
- function_call_messages, picked_tools_prompt, special_tool_call = self.run_function_call(
451
- last_outputs, return_message=True,
452
- existing_tools_prompt=picked_tools_prompt,
453
- message_for_call_agent=message,
454
- call_agent=call_agent,
455
- call_agent_level=call_agent_level,
456
- temperature=temperature)
457
-
458
- if special_tool_call == 'Finish':
459
- next_round = False
460
- conversation.extend(function_call_messages)
461
- content = function_call_messages[0]['content']
462
- if content is None:
463
- return "❌ No content returned after Finish tool call."
464
- return content.split('[FinalAnswer]')[-1]
465
-
466
- if (self.enable_summary or token_overflow) and not call_agent:
467
- enable_summary = True
468
- last_status = self.function_result_summary(
469
- conversation, status=last_status, enable_summary=enable_summary)
470
-
471
- if function_call_messages:
472
- conversation.extend(function_call_messages)
473
- outputs.append(tool_result_format(function_call_messages))
474
- else:
475
- next_round = False
476
- conversation.extend([{"role": "assistant", "content": ''.join(last_outputs)}])
477
- return ''.join(last_outputs).replace("</s>", "")
478
-
479
- last_outputs = []
480
- outputs.append("### TxAgent:\n")
481
- last_outputs_str, token_overflow = self.llm_infer(
482
- messages=conversation,
483
- temperature=temperature,
484
- tools=picked_tools_prompt,
485
- skip_special_tokens=False,
486
- max_new_tokens=2048,
487
- max_token=131072,
488
- check_token_status=True)
489
- if last_outputs_str is None:
490
- logger.warning("Token limit exceeded")
491
- if self.force_finish:
492
- return self.get_answer_based_on_unfinished_reasoning(
493
- conversation, temperature, max_new_tokens, max_token)
494
- return "❌ Token limit exceeded."
495
- last_outputs.append(last_outputs_str)
496
-
497
- if max_round == current_round:
498
- logger.warning("Max rounds exceeded")
499
- if self.force_finish:
500
- return self.get_answer_based_on_unfinished_reasoning(
501
- conversation, temperature, max_new_tokens, max_token)
502
- return None
503
-
504
- def analyze_document(self,
505
- file_path: str,
506
- temperature: float = 0.1,
507
- max_new_tokens: int = 2048,
508
- max_token: int = 131072) -> Dict[str, Union[str, List]]:
509
- """
510
- Analyze a document and return structured results.
511
-
512
- Args:
513
- file_path: Path to the document
514
- temperature: Sampling temperature
515
- max_new_tokens: Max new tokens per step
516
- max_token: Max total tokens
517
-
518
- Returns:
519
- Dictionary with analysis results
520
- """
521
- logger.info("Starting document analysis for: %s", file_path)
522
- start_time = time.time()
523
-
524
- try:
525
- extracted_text = self.extract_text(file_path)
526
- if not extracted_text:
527
- raise ValueError("Could not extract text from document")
528
-
529
- chunks = self.split_text(extracted_text)
530
- batches = self.batch_chunks(chunks, batch_size=1)
531
- batch_results = []
532
-
533
- for batch in batches:
534
- prompt = "\n\n".join(self.build_prompt(chunk) for chunk in batch)
535
- response = self.run_multistep_agent(
536
- prompt,
537
- temperature=temperature,
538
- max_new_tokens=max_new_tokens,
539
- max_token=max_token,
540
- call_agent=False
541
- )
542
- batch_results.append(self.clean_response(response or "No response"))
543
-
544
- combined = "\n\n".join([res for res in batch_results if not res.startswith("❌")])
545
- if not combined:
546
- raise ValueError("No valid batch responses generated")
547
-
548
- final_summary = self.generate_final_summary(self, combined)
549
-
550
- return {
551
- "status": "success",
552
- "summary": final_summary,
553
- "batch_results": batch_results,
554
- "processing_time": time.time() - start_time
555
- }
556
-
557
- except Exception as e:
558
- logger.error("Document analysis failed: %s", str(e))
559
- return {
560
- "status": "error",
561
- "message": str(e),
562
- "processing_time": time.time() - start_time
563
- }
564
-
565
- def get_answer_based_on_unfinished_reasoning(self,
566
- conversation: List[Dict],
567
- temperature: float,
568
- max_new_tokens: int,
569
- max_token: int) -> str:
570
- """
571
- Generate a final answer when reasoning is incomplete.
572
-
573
- Args:
574
- conversation: Current conversation history
575
- temperature: Sampling temperature
576
- max_new_tokens: Max new tokens
577
- max_token: Max total tokens
578
-
579
- Returns:
580
- Final answer string
581
- """
582
- if conversation[-1]['role'] == 'assistant':
583
- conversation.append(
584
- {'role': 'tool', 'content': 'Errors occurred during function call; provide final answer with current information.'})
585
- finish_tools_prompt = self.add_finish_tools([])
586
- last_outputs_str = self.llm_infer(
587
- messages=conversation,
588
- temperature=temperature,
589
- tools=finish_tools_prompt,
590
- output_begin_string='[FinalAnswer]',
591
- skip_special_tokens=True,
592
- max_new_tokens=max_new_tokens,
593
- max_token=max_token)
594
- logger.info("Unfinished reasoning answer: %s", last_outputs_str[:100])
595
- return last_outputs_str
596
-
597
- def update_parameters(self, **kwargs) -> Dict:
598
- """
599
- Update agent parameters dynamically.
600
-
601
- Args:
602
- kwargs: Parameter names and values to update
603
-
604
- Returns:
605
- Dictionary of updated parameters
606
- """
607
- updated_attributes = {}
608
- for key, value in kwargs.items():
609
- if hasattr(self, key):
610
- setattr(self, key, value)
611
- updated_attributes[key] = value
612
- logger.info("Updated parameters: %s", updated_attributes)
613
- return updated_attributes
614
-
615
  def cleanup(self) -> None:
616
  """Clean up resources and clear memory."""
617
  if hasattr(self, 'model'):
 
5
  import numpy as np
6
  from vllm import LLM, SamplingParams
7
  from jinja2 import Template
8
+ from typing import List, Dict, Optional, Union, Tuple, Generator
9
  import types
10
  from tooluniverse import ToolUniverse
11
  from .toolrag import ToolRAGModel
 
40
  additional_default_tools: Optional[List] = None):
41
  """
42
  Initialize the TxAgent with specified configuration.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  """
44
  self.model_name = model_name
45
  self.tokenizer = None
 
75
  def load_models(self, model_name: Optional[str] = None) -> str:
76
  """
77
  Load the specified model or the default model if none specified.
 
 
 
 
 
 
78
  """
79
  if model_name is not None:
80
  if model_name == self.model_name:
 
115
  logger.error("Failed to load tools: %s", str(e))
116
  raise RuntimeError(f"Failed to load tools: {str(e)}")
117
 
118
+ def run_multistep_agent(self,
119
+ message: str,
120
+ temperature: float,
121
+ max_new_tokens: int,
122
+ max_token: int,
123
+ max_round: int = 5,
124
+ call_agent: bool = False,
125
+ call_agent_level: int = 0) -> Optional[str]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  """
127
+ Run multi-step reasoning with the agent.
 
 
 
 
 
 
 
 
128
  """
129
+ logger.info("Starting multistep agent for message: %s", message[:100])
130
+ picked_tools_prompt = []
131
+ call_agent_level = 0
132
+ if call_agent:
133
+ call_agent_level += 1
134
+ if call_agent_level >= 2:
135
+ call_agent = False
136
 
137
+ conversation = []
138
  conversation = self.set_system_prompt(conversation, self.prompt_multi_step)
 
 
 
 
 
 
139
  conversation.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
140
 
141
+ outputs = []
142
+ last_outputs = []
143
+ next_round = True
144
+ current_round = 0
145
+ token_overflow = False
146
+ enable_summary = False
147
+ last_status = {}
 
 
 
 
 
148
 
149
+ while next_round and current_round < max_round:
150
+ current_round += 1
151
+ if len(outputs) > 0:
152
+ function_call_messages, picked_tools_prompt, special_tool_call = self.run_function_call(
153
+ last_outputs,
154
+ return_message=True,
155
+ existing_tools_prompt=picked_tools_prompt,
156
+ message_for_call_agent=message,
157
+ call_agent=call_agent,
158
+ call_agent_level=call_agent_level,
159
+ temperature=temperature
160
+ )
161
 
162
+ if special_tool_call == 'Finish':
163
+ next_round = False
164
+ conversation.extend(function_call_messages)
165
+ content = function_call_messages[0]['content']
166
+ if content is None:
167
+ return "❌ No content returned after Finish tool call."
168
+ return content.split('[FinalAnswer]')[-1]
169
 
170
+ if (self.enable_summary or token_overflow) and not call_agent:
171
+ enable_summary = True
172
+ last_status = self.function_result_summary(
173
+ conversation, status=last_status, enable_summary=enable_summary)
 
 
 
174
 
175
+ if function_call_messages:
176
+ conversation.extend(function_call_messages)
177
+ outputs.append(tool_result_format(function_call_messages))
178
+ else:
179
+ next_round = False
180
+ conversation.extend([{"role": "assistant", "content": ''.join(last_outputs)}])
181
+ return ''.join(last_outputs).replace("</s>", "")
182
+
183
+ last_outputs = []
184
+ outputs.append("### TxAgent:\n")
185
+ last_outputs_str, token_overflow = self.llm_infer(
186
+ messages=conversation,
187
+ temperature=temperature,
188
+ tools=picked_tools_prompt,
189
+ skip_special_tokens=False,
190
+ max_new_tokens=2048,
191
+ max_token=131072,
192
+ check_token_status=True)
193
+
194
+ if last_outputs_str is None:
195
+ logger.warning("Token limit exceeded")
196
+ if self.force_finish:
197
+ return self.get_answer_based_on_unfinished_reasoning(
198
+ conversation, temperature, max_new_tokens, max_token)
199
+ return "❌ Token limit exceeded."
200
+
201
+ last_outputs.append(last_outputs_str)
202
 
203
+ if max_round == current_round:
204
+ logger.warning("Max rounds exceeded")
205
+ if self.force_finish:
206
+ return self.get_answer_based_on_unfinished_reasoning(
207
+ conversation, temperature, max_new_tokens, max_token)
208
+ return None
 
209
 
210
  def run_function_call(self,
211
  fcall_str: str,
 
217
  temperature: Optional[float] = None) -> Tuple[List[Dict], List, str]:
218
  """
219
  Execute function calls from the model's output.
 
 
 
 
 
 
 
 
 
 
 
 
220
  """
221
  try:
222
  function_call_json, message = self.tooluniverse.extract_function_call_json(
 
241
  full_message = (
242
  (message_for_call_agent or "") +
243
  "\nYou must follow the following plan to answer the question: " +
244
+ str(solution_plan))
245
  call_result = self.run_multistep_agent(
246
+ full_message,
247
+ temperature=temperature,
248
+ max_new_tokens=512,
249
+ max_token=131072,
250
+ call_agent=False,
251
+ call_agent_level=call_agent_level
252
+ )
253
  if call_result is None:
254
  call_result = "⚠️ No content returned from sub-agent."
255
  else:
 
258
  call_result = "Error: CallAgent disabled."
259
  else:
260
  call_result = self.tooluniverse.run_one_function(function_call_json[i])
261
+
262
  call_id = self.tooluniverse.call_id_gen()
263
  function_call_json[i]["call_id"] = call_id
264
  logger.info("Tool Call Result: %s", call_result)
 
270
  "call_id": call_id
271
  })
272
  })
 
 
 
 
 
273
 
274
  revised_messages = [{
275
  "role": "assistant",
276
  "content": message.strip(),
277
  "tool_calls": json.dumps(function_call_json)
278
  }] + call_results
279
+
280
  return revised_messages, existing_tools_prompt or [], special_tool_call
281
 
282
  def llm_infer(self,
283
+ messages: List[Dict],
284
+ temperature: float = 0.1,
285
+ tools: Optional[List] = None,
286
+ output_begin_string: Optional[str] = None,
287
+ max_new_tokens: int = 512,
288
+ max_token: int = 131072,
289
+ skip_special_tokens: bool = True,
290
+ model: Optional[LLM] = None,
291
+ check_token_status: bool = False) -> Union[str, Tuple[str, bool]]:
292
  """
293
  Perform inference using the LLM.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294
  """
295
  model = model or self.model
296
  tokenizer = self.tokenizer
 
333
  logger.error("Inference failed: %s", str(e))
334
  raise RuntimeError(f"Inference failed: {str(e)}")
335
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336
  def cleanup(self) -> None:
337
  """Clean up resources and clear memory."""
338
  if hasattr(self, 'model'):