Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
-
# app.py (Updated for
|
2 |
import gradio as gr
|
3 |
import openai
|
|
|
4 |
import threading
|
5 |
import time
|
6 |
import numpy as np
|
@@ -9,8 +10,8 @@ import os
|
|
9 |
import pickle
|
10 |
from datetime import datetime
|
11 |
|
12 |
-
#
|
13 |
-
|
14 |
|
15 |
# === CONFIG ===
|
16 |
EMBEDDING_MODEL = "text-embedding-3-small"
|
@@ -21,7 +22,7 @@ INDEX_FILE = "memory.index"
|
|
21 |
# === EMBEDDING UTILS ===
|
22 |
def get_embedding(text, model=EMBEDDING_MODEL):
|
23 |
text = text.replace("\n", " ")
|
24 |
-
response =
|
25 |
return response.data[0].embedding
|
26 |
|
27 |
def cosine_similarity(vec1, vec2):
|
@@ -50,12 +51,16 @@ conversation = []
|
|
50 |
turn_count = 0
|
51 |
auto_mode = False
|
52 |
|
53 |
-
# === CHAT COMPLETION ===
|
54 |
def chat_completion(system, messages, model=CHAT_MODEL):
|
55 |
try:
|
56 |
-
|
|
|
|
|
|
|
|
|
57 |
model=model,
|
58 |
-
messages=
|
59 |
temperature=0.7,
|
60 |
max_tokens=150
|
61 |
)
|
|
|
1 |
+
# app.py (Updated for OpenAI v1.x)
|
2 |
import gradio as gr
|
3 |
import openai
|
4 |
+
from openai import OpenAI # Import the new client
|
5 |
import threading
|
6 |
import time
|
7 |
import numpy as np
|
|
|
10 |
import pickle
|
11 |
from datetime import datetime
|
12 |
|
13 |
+
# Initialize OpenAI client
|
14 |
+
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
15 |
|
16 |
# === CONFIG ===
|
17 |
EMBEDDING_MODEL = "text-embedding-3-small"
|
|
|
22 |
# === EMBEDDING UTILS ===
|
23 |
def get_embedding(text, model=EMBEDDING_MODEL):
|
24 |
text = text.replace("\n", " ")
|
25 |
+
response = client.embeddings.create(input=[text], model=model)
|
26 |
return response.data[0].embedding
|
27 |
|
28 |
def cosine_similarity(vec1, vec2):
|
|
|
51 |
turn_count = 0
|
52 |
auto_mode = False
|
53 |
|
54 |
+
# === CHAT COMPLETION (UPDATED FOR OPENAI v1.x) ===
|
55 |
def chat_completion(system, messages, model=CHAT_MODEL):
|
56 |
try:
|
57 |
+
# Build message list with system prompt
|
58 |
+
full_messages = [{"role": "system", "content": system}]
|
59 |
+
full_messages.extend(messages)
|
60 |
+
|
61 |
+
response = client.chat.completions.create(
|
62 |
model=model,
|
63 |
+
messages=full_messages,
|
64 |
temperature=0.7,
|
65 |
max_tokens=150
|
66 |
)
|