SyedHasanCronosPMC commited on
Commit
b7cea2a
·
verified ·
1 Parent(s): eae3210

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -41
app.py CHANGED
@@ -5,6 +5,7 @@ import pandas as pd
5
  from langgraph.graph import StateGraph
6
  from langchain_core.messages import HumanMessage, AIMessage
7
  import warnings
 
8
  warnings.filterwarnings("ignore")
9
 
10
  # Define a Command class for langgraph 0.0.41
@@ -19,51 +20,42 @@ api_key = os.getenv("ANTHROPIC_API_KEY")
19
  if not api_key:
20
  raise ValueError("ANTHROPIC_API_KEY environment variable not set")
21
 
22
- # Create a custom LLM implementation to avoid the proxies issue
23
- def create_llm():
24
- # Directly use the Anthropic client instead of LangChain's wrapper
25
- from anthropic import Anthropic
26
-
27
- # Create the base client without any proxies
28
- client = Anthropic(api_key=api_key)
29
 
30
- # Create a simple wrapper function that mimics the LangChain interface
31
- class CustomAnthropicLLM:
32
- def __init__(self, client, model):
33
- self.client = client
34
- self.model = model
35
-
36
- def invoke(self, inputs):
37
- if isinstance(inputs, dict) and "messages" in inputs:
38
- messages = inputs["messages"]
39
- formatted_messages = []
40
-
41
- for msg in messages:
42
- role = "user" if isinstance(msg, HumanMessage) else "assistant"
43
- formatted_messages.append({"role": role, "content": msg.content})
44
-
45
- response = self.client.messages.create(
46
- model=self.model,
47
- messages=formatted_messages,
48
- max_tokens=1024
49
- )
50
- return response.content[0].text
51
-
52
- elif isinstance(inputs, str):
53
- response = self.client.messages.create(
54
- model=self.model,
55
- messages=[{"role": "user", "content": inputs}],
56
- max_tokens=1024
57
- )
58
- return response.content[0].text
59
 
 
 
 
 
60
  else:
61
- raise ValueError(f"Unsupported input format: {type(inputs)}")
62
-
63
- return CustomAnthropicLLM(client, "claude-3-5-sonnet-20240229")
 
 
 
 
 
 
 
64
 
65
- # Create our custom LLM
66
- llm = create_llm()
67
 
68
  # System prompt constructor
69
  def make_system_prompt(suffix: str) -> str:
 
5
  from langgraph.graph import StateGraph
6
  from langchain_core.messages import HumanMessage, AIMessage
7
  import warnings
8
+ import httpx
9
  warnings.filterwarnings("ignore")
10
 
11
  # Define a Command class for langgraph 0.0.41
 
20
  if not api_key:
21
  raise ValueError("ANTHROPIC_API_KEY environment variable not set")
22
 
23
+ # Override httpx to disable proxies globally
24
+ httpx._config.PROXIES = None # This disables proxies for all httpx clients
25
+
26
+ # Mock LLM implementation that doesn't rely on any external HTTP clients
27
+ class MockLLM:
28
+ def __init__(self):
29
+ self.model = "mock-model"
30
 
31
+ def invoke(self, inputs):
32
+ if isinstance(inputs, dict) and "messages" in inputs:
33
+ # Process the messages
34
+ content = ""
35
+ for msg in inputs["messages"]:
36
+ if hasattr(msg, "content"):
37
+ content += msg.content + "\n"
38
+ elif isinstance(msg, dict) and "content" in msg:
39
+ content += msg["content"] + "\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
+ # For research queries
42
+ if "research" in content.lower():
43
+ return "Based on my research, here's what I found about your topic. FINAL ANSWER: This topic shows trends that would be interesting to visualize in a chart."
44
+ # For chart generation
45
  else:
46
+ return "I've analyzed the data and created a chart visualization. FINAL ANSWER: The chart shows an upward trend from 2020 to 2024."
47
+
48
+ elif isinstance(inputs, str):
49
+ if "research" in inputs.lower():
50
+ return "Based on my research, here's what I found about your topic. FINAL ANSWER: This topic shows trends that would be interesting to visualize in a chart."
51
+ else:
52
+ return "I've analyzed the data and created a chart visualization. FINAL ANSWER: The chart shows an upward trend from 2020 to 2024."
53
+
54
+ else:
55
+ return "I've processed your request. FINAL ANSWER: Here's a summary of what I found."
56
 
57
+ # Create our mock LLM
58
+ llm = MockLLM()
59
 
60
  # System prompt constructor
61
  def make_system_prompt(suffix: str) -> str: