Spaces:
Sleeping
Sleeping
Update actions/actions.py
Browse files- actions/actions.py +44 -11
actions/actions.py
CHANGED
@@ -11,7 +11,8 @@ from rasa_sdk.executor import CollectingDispatcher
|
|
11 |
import random
|
12 |
import os
|
13 |
import sys
|
14 |
-
import openai
|
|
|
15 |
|
16 |
# Add "/app/actions" to the sys.path
|
17 |
actions_path = os.path.abspath("/app/actions")
|
@@ -31,6 +32,39 @@ secret_value_0 = os.environ.get("openai")
|
|
31 |
openai.api_key = secret_value_0
|
32 |
# Provide your OpenAI API key
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def generate_openai_response(user_queries, model_engine="gpt-3.5-turbo", max_tokens=100, temperature=0.5):
|
35 |
"""Generate a response using the OpenAI API."""
|
36 |
|
@@ -52,16 +86,15 @@ def generate_openai_response(user_queries, model_engine="gpt-3.5-turbo", max_tok
|
|
52 |
if i<max_user_queries_to_include:
|
53 |
messages.append({"role": "user", "content": user_queries[-max_user_queries_to_include+i]})
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
return response.choices[0]['message']['content'].strip()#+"Debug Info: Openai-input_1: "+str(messages_1)+" Openai_input_0"+str( messages_0)+" user_queries: "+str(user_queries)
|
65 |
|
66 |
class GetOpenAIResponse(Action):
|
67 |
|
|
|
11 |
import random
|
12 |
import os
|
13 |
import sys
|
14 |
+
# import openai
|
15 |
+
import cohere
|
16 |
|
17 |
# Add "/app/actions" to the sys.path
|
18 |
actions_path = os.path.abspath("/app/actions")
|
|
|
32 |
openai.api_key = secret_value_0
|
33 |
# Provide your OpenAI API key
|
34 |
|
35 |
+
# def generate_openai_response(user_queries, model_engine="gpt-3.5-turbo", max_tokens=100, temperature=0.5):
|
36 |
+
# """Generate a response using the OpenAI API."""
|
37 |
+
|
38 |
+
# # Send last two user queries for vector search
|
39 |
+
# if len(user_queries) >= 2:
|
40 |
+
# results = main_search(user_queries[-1]+user_queries[-2])
|
41 |
+
# else:
|
42 |
+
# results = main_search(user_queries[-1])
|
43 |
+
|
44 |
+
# # Create context from the results
|
45 |
+
# context = "".join([f"#{str(i)}" for i in results])[:2014] # Trim the context to 2014 characters - Modify as necessory
|
46 |
+
|
47 |
+
# messages=[
|
48 |
+
# {"role": "system", "content": f"You are a helpful assistant tasked to answer user queries using the following context: {context}"}
|
49 |
+
# ]
|
50 |
+
# max_user_queries_to_include = min(1,len(user_queries))
|
51 |
+
# # The latest query is at the end of the list
|
52 |
+
# for i in range(len(user_queries)):
|
53 |
+
# if i<max_user_queries_to_include:
|
54 |
+
# messages.append({"role": "user", "content": user_queries[-max_user_queries_to_include+i]})
|
55 |
+
|
56 |
+
# response = openai.ChatCompletion.create(
|
57 |
+
# model="gpt-3.5-turbo",
|
58 |
+
# messages= messages,
|
59 |
+
# max_tokens=124,
|
60 |
+
# temperature=0,
|
61 |
+
# top_p=1,
|
62 |
+
# frequency_penalty=0,
|
63 |
+
# presence_penalty=0
|
64 |
+
# )
|
65 |
+
# return response.choices[0]['message']['content'].strip()#+"Debug Info: Openai-input_1: "+str(messages_1)+" Openai_input_0"+str( messages_0)+" user_queries: "+str(user_queries)
|
66 |
+
|
67 |
+
|
68 |
def generate_openai_response(user_queries, model_engine="gpt-3.5-turbo", max_tokens=100, temperature=0.5):
|
69 |
"""Generate a response using the OpenAI API."""
|
70 |
|
|
|
86 |
if i<max_user_queries_to_include:
|
87 |
messages.append({"role": "user", "content": user_queries[-max_user_queries_to_include+i]})
|
88 |
|
89 |
+
|
90 |
+
co = cohere.Client(secret_value_0)
|
91 |
+
|
92 |
+
response = co.generate(
|
93 |
+
prompt=messages,
|
94 |
+
)
|
95 |
+
print(response)
|
96 |
+
return response[0].text.strip()#+"Debug Info:
|
97 |
+
|
|
|
98 |
|
99 |
class GetOpenAIResponse(Action):
|
100 |
|