Shreyas094 commited on
Commit
0e78477
·
verified ·
1 Parent(s): 2cb0f1d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -6
app.py CHANGED
@@ -19,6 +19,7 @@ from llama_cpp_agent.tools import WebSearchTool
19
  from llama_cpp_agent.prompt_templates import web_search_system_prompt, research_system_prompt
20
  from langchain_community.llms import HuggingFaceHub
21
  from llama_cpp_agent.llm_output_settings import LlmStructuredOutputSettings, LlmStructuredOutputType
 
22
  from llama_cpp_agent.llm_output_settings import LlmStructuredOutputType
23
 
24
  print("Available LlmStructuredOutputType options:")
@@ -100,6 +101,12 @@ examples = [
100
  ]
101
 
102
 
 
 
 
 
 
 
103
  class HuggingFaceHubWrapper:
104
  def __init__(self, repo_id, model_kwargs, huggingfacehub_api_token):
105
  self.model = HuggingFaceHub(
@@ -107,13 +114,22 @@ class HuggingFaceHubWrapper:
107
  model_kwargs=model_kwargs,
108
  huggingfacehub_api_token=huggingfacehub_api_token
109
  )
 
 
 
 
110
 
111
  def get_provider_default_settings(self):
112
- return LlmStructuredOutputSettings(
113
- output_type=LlmStructuredOutputType.no_structured_output,
114
- include_system_prompt=False,
115
- include_user_prompt=False,
116
- include_assistant_prompt=False,
 
 
 
 
 
117
  )
118
 
119
  def get_provider_identifier(self):
@@ -122,7 +138,6 @@ class HuggingFaceHubWrapper:
122
  def __call__(self, *args, **kwargs):
123
  return self.model(*args, **kwargs)
124
 
125
- # Add any other methods that might be required by WebSearchTool
126
  def get_num_tokens(self, text):
127
  # This is a placeholder. You might need to implement a proper token counting method
128
  return len(text.split())
 
19
  from llama_cpp_agent.prompt_templates import web_search_system_prompt, research_system_prompt
20
  from langchain_community.llms import HuggingFaceHub
21
  from llama_cpp_agent.llm_output_settings import LlmStructuredOutputSettings, LlmStructuredOutputType
22
+ from pydantic import BaseModel
23
  from llama_cpp_agent.llm_output_settings import LlmStructuredOutputType
24
 
25
  print("Available LlmStructuredOutputType options:")
 
101
  ]
102
 
103
 
104
+ class CustomLLMSettings(BaseModel):
105
+ structured_output: LlmStructuredOutputSettings
106
+ temperature: float
107
+ top_p: float
108
+ repetition_penalty: float
109
+
110
  class HuggingFaceHubWrapper:
111
  def __init__(self, repo_id, model_kwargs, huggingfacehub_api_token):
112
  self.model = HuggingFaceHub(
 
114
  model_kwargs=model_kwargs,
115
  huggingfacehub_api_token=huggingfacehub_api_token
116
  )
117
+ self.temperature = model_kwargs.get('temperature', 0.7)
118
+ self.top_p = model_kwargs.get('top_p', 0.95)
119
+ self.repetition_penalty = model_kwargs.get('repetition_penalty', 1.1)
120
+
121
 
122
  def get_provider_default_settings(self):
123
+ return CustomLLMSettings(
124
+ structured_output=LlmStructuredOutputSettings(
125
+ output_type=LlmStructuredOutputType.no_structured_output,
126
+ include_system_prompt=False,
127
+ include_user_prompt=False,
128
+ include_assistant_prompt=False,
129
+ ),
130
+ temperature=self.temperature,
131
+ top_p=self.top_p,
132
+ repetition_penalty=self.repetition_penalty
133
  )
134
 
135
  def get_provider_identifier(self):
 
138
  def __call__(self, *args, **kwargs):
139
  return self.model(*args, **kwargs)
140
 
 
141
  def get_num_tokens(self, text):
142
  # This is a placeholder. You might need to implement a proper token counting method
143
  return len(text.split())