Shreyas094 commited on
Commit
2cb0f1d
·
verified ·
1 Parent(s): fd59f79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -3
app.py CHANGED
@@ -6,7 +6,7 @@ from typing import List
6
  from datetime import datetime, timezone
7
  from pydantic import BaseModel, Field
8
  from trafilatura import fetch_url, extract
9
- from langchain import HuggingFaceHub
10
 
11
  from llama_cpp_agent import MessagesFormatterType
12
  from llama_cpp_agent.chat_history import BasicChatHistory
@@ -17,7 +17,7 @@ from llama_cpp_agent.llm_output_settings import (
17
  )
18
  from llama_cpp_agent.tools import WebSearchTool
19
  from llama_cpp_agent.prompt_templates import web_search_system_prompt, research_system_prompt
20
- from langchain import HuggingFaceHub
21
  from llama_cpp_agent.llm_output_settings import LlmStructuredOutputSettings, LlmStructuredOutputType
22
  from llama_cpp_agent.llm_output_settings import LlmStructuredOutputType
23
 
@@ -110,15 +110,27 @@ class HuggingFaceHubWrapper:
110
 
111
  def get_provider_default_settings(self):
112
  return LlmStructuredOutputSettings(
113
- output_type=LlmStructuredOutputType.no_structured_output, # Changed to a valid option
114
  include_system_prompt=False,
115
  include_user_prompt=False,
116
  include_assistant_prompt=False,
117
  )
118
 
 
 
 
119
  def __call__(self, *args, **kwargs):
120
  return self.model(*args, **kwargs)
121
 
 
 
 
 
 
 
 
 
 
122
  # Utility functions
123
  def get_server_time():
124
  utc_time = datetime.now(timezone.utc)
 
6
  from datetime import datetime, timezone
7
  from pydantic import BaseModel, Field
8
  from trafilatura import fetch_url, extract
9
+ from langchain_community.llms import HuggingFaceHub
10
 
11
  from llama_cpp_agent import MessagesFormatterType
12
  from llama_cpp_agent.chat_history import BasicChatHistory
 
17
  )
18
  from llama_cpp_agent.tools import WebSearchTool
19
  from llama_cpp_agent.prompt_templates import web_search_system_prompt, research_system_prompt
20
+ from langchain_community.llms import HuggingFaceHub
21
  from llama_cpp_agent.llm_output_settings import LlmStructuredOutputSettings, LlmStructuredOutputType
22
  from llama_cpp_agent.llm_output_settings import LlmStructuredOutputType
23
 
 
110
 
111
  def get_provider_default_settings(self):
112
  return LlmStructuredOutputSettings(
113
+ output_type=LlmStructuredOutputType.no_structured_output,
114
  include_system_prompt=False,
115
  include_user_prompt=False,
116
  include_assistant_prompt=False,
117
  )
118
 
119
+ def get_provider_identifier(self):
120
+ return "HuggingFaceHub"
121
+
122
  def __call__(self, *args, **kwargs):
123
  return self.model(*args, **kwargs)
124
 
125
+ # Add any other methods that might be required by WebSearchTool
126
+ def get_num_tokens(self, text):
127
+ # This is a placeholder. You might need to implement a proper token counting method
128
+ return len(text.split())
129
+
130
+ def get_max_tokens(self):
131
+ # This is a placeholder. Return the actual max tokens for your model
132
+ return 2048
133
+
134
  # Utility functions
135
  def get_server_time():
136
  utc_time = datetime.now(timezone.utc)