Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,12 +4,14 @@ from langsmith.run_helpers import traceable
|
|
4 |
from langsmith_config import setup_langsmith_config
|
5 |
import base64
|
6 |
import os
|
|
|
7 |
|
8 |
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
|
9 |
model = "gpt-3.5-turbo-1106"
|
10 |
model_vision = "gpt-4-vision-preview"
|
11 |
setup_langsmith_config()
|
12 |
-
|
|
|
13 |
|
14 |
def process_images(msg: cl.Message):
|
15 |
# Processing images exclusively
|
@@ -58,7 +60,7 @@ def handle_vision_call(msg, image_history):
|
|
58 |
image_history.clear()
|
59 |
return stream
|
60 |
|
61 |
-
@traceable(run_type="llm", name="gpt 3 turbo call")
|
62 |
async def gpt_call(message_history: list = []):
|
63 |
client = OpenAI()
|
64 |
|
@@ -66,11 +68,11 @@ async def gpt_call(message_history: list = []):
|
|
66 |
model=model,
|
67 |
messages=message_history,
|
68 |
stream=True,
|
|
|
69 |
)
|
70 |
-
|
71 |
return stream
|
72 |
|
73 |
-
@traceable(run_type="llm", name="gpt 4 turbo vision call")
|
74 |
def gpt_vision_call(image_history: list = []):
|
75 |
client = OpenAI()
|
76 |
|
@@ -79,6 +81,7 @@ def gpt_vision_call(image_history: list = []):
|
|
79 |
messages=image_history,
|
80 |
max_tokens=300,
|
81 |
stream=True,
|
|
|
82 |
)
|
83 |
|
84 |
return stream
|
@@ -90,9 +93,10 @@ def start_chat():
|
|
90 |
[{"role": "system", "content": "You are a helpful assistant."}],
|
91 |
)
|
92 |
cl.user_session.set("image_history", [{"role": "system", "content": "You are a helpful assistant."}])
|
|
|
93 |
|
94 |
@cl.on_message
|
95 |
-
@traceable(run_type="chain", name="message")
|
96 |
async def on_message(msg: cl.Message):
|
97 |
message_history = cl.user_session.get("message_history")
|
98 |
image_history = cl.user_session.get("image_history")
|
|
|
4 |
from langsmith_config import setup_langsmith_config
|
5 |
import base64
|
6 |
import os
|
7 |
+
import uuid
|
8 |
|
9 |
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
|
10 |
model = "gpt-3.5-turbo-1106"
|
11 |
model_vision = "gpt-4-vision-preview"
|
12 |
setup_langsmith_config()
|
13 |
+
# generate UUID for the user from python
|
14 |
+
user_id = str(uuid.uuid4())
|
15 |
|
16 |
def process_images(msg: cl.Message):
|
17 |
# Processing images exclusively
|
|
|
60 |
image_history.clear()
|
61 |
return stream
|
62 |
|
63 |
+
@traceable(run_type="llm", name="gpt 3 turbo call", metadata={"user": user_id})
|
64 |
async def gpt_call(message_history: list = []):
|
65 |
client = OpenAI()
|
66 |
|
|
|
68 |
model=model,
|
69 |
messages=message_history,
|
70 |
stream=True,
|
71 |
+
user=user_id,
|
72 |
)
|
|
|
73 |
return stream
|
74 |
|
75 |
+
@traceable(run_type="llm", name="gpt 4 turbo vision call", metadata={"user": user_id})
|
76 |
def gpt_vision_call(image_history: list = []):
|
77 |
client = OpenAI()
|
78 |
|
|
|
81 |
messages=image_history,
|
82 |
max_tokens=300,
|
83 |
stream=True,
|
84 |
+
user=user_id,
|
85 |
)
|
86 |
|
87 |
return stream
|
|
|
93 |
[{"role": "system", "content": "You are a helpful assistant."}],
|
94 |
)
|
95 |
cl.user_session.set("image_history", [{"role": "system", "content": "You are a helpful assistant."}])
|
96 |
+
|
97 |
|
98 |
@cl.on_message
|
99 |
+
@traceable(run_type="chain", name="message", metadata={"user": user_id})
|
100 |
async def on_message(msg: cl.Message):
|
101 |
message_history = cl.user_session.get("message_history")
|
102 |
image_history = cl.user_session.get("image_history")
|