Update app.py
Browse files
app.py
CHANGED
@@ -2,9 +2,11 @@ import os
|
|
2 |
|
3 |
import gradio as gr
|
4 |
import openai
|
|
|
5 |
|
6 |
#from dotenv import load_dotenv
|
7 |
|
|
|
8 |
#load_dotenv()
|
9 |
|
10 |
llm_api_options = ["OpenAI API","Azure OpenAI API","Google PaLM API", "Llama 2"]
|
@@ -13,8 +15,12 @@ openai_models = ["gpt-4", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-
|
|
13 |
"gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "text-davinci-003",
|
14 |
"text-davinci-002", "text-curie-001", "text-babbage-001", "text-ada-001"]
|
15 |
|
|
|
|
|
16 |
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
|
17 |
azure_deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME")
|
|
|
|
|
18 |
|
19 |
def openai_text_completion(prompt: str, model: str):
|
20 |
try:
|
@@ -74,16 +80,46 @@ def azure_openai_text_completion(prompt: str, model: str):
|
|
74 |
return f" {optionSelection} test_handler Error - {exception}", ""
|
75 |
|
76 |
|
77 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
match optionSelection:
|
79 |
case "OpenAI API":
|
80 |
-
message, response = openai_text_completion(prompt,
|
81 |
return message, response
|
82 |
case "Azure OpenAI API":
|
83 |
-
message, response = azure_openai_text_completion(prompt,
|
84 |
return message, response
|
85 |
case "Google PaLM API":
|
86 |
-
|
|
|
87 |
case "Llama 2":
|
88 |
return "", ""
|
89 |
case _:
|
@@ -97,7 +133,9 @@ with gr.Blocks() as LLMDemoTabbedScreen:
|
|
97 |
llm_options = gr.Radio(llm_api_options, label="Select one", info="Which service do you want to use?", value="OpenAI API")
|
98 |
with gr.Tab("Open AI"):
|
99 |
openai_model = gr.Dropdown(openai_models, value="gpt-4", label="Model", info="Select one, for Natural language")
|
100 |
-
|
|
|
|
|
101 |
with gr.Row():
|
102 |
with gr.Column():
|
103 |
test_string = gr.Textbox(label="Try String", value=TEST_MESSAGE, lines=2)
|
@@ -108,7 +146,7 @@ with gr.Blocks() as LLMDemoTabbedScreen:
|
|
108 |
|
109 |
test_button.click(
|
110 |
fn=test_handler,
|
111 |
-
inputs=[llm_options, test_string, openai_model],
|
112 |
outputs=[test_string_output_info, test_string_response]
|
113 |
)
|
114 |
|
|
|
2 |
|
3 |
import gradio as gr
|
4 |
import openai
|
5 |
+
import google.generativeai as palm
|
6 |
|
7 |
#from dotenv import load_dotenv
|
8 |
|
9 |
+
|
10 |
#load_dotenv()
|
11 |
|
12 |
llm_api_options = ["OpenAI API","Azure OpenAI API","Google PaLM API", "Llama 2"]
|
|
|
15 |
"gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "text-davinci-003",
|
16 |
"text-davinci-002", "text-curie-001", "text-babbage-001", "text-ada-001"]
|
17 |
|
18 |
+
google_palm_models = ["models/text-bison-001"]
|
19 |
+
|
20 |
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
|
21 |
azure_deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME")
|
22 |
+
google_palm_key = os.getenv("GOOGLE_PALM_AI_API_KEY")
|
23 |
+
|
24 |
|
25 |
def openai_text_completion(prompt: str, model: str):
|
26 |
try:
|
|
|
80 |
return f" {optionSelection} test_handler Error - {exception}", ""
|
81 |
|
82 |
|
83 |
+
def palm_text_completion(prompt: str, model: str):
|
84 |
+
try:
|
85 |
+
temperature = 0.7
|
86 |
+
candidate_count = 1
|
87 |
+
top_k = 40
|
88 |
+
top_p = 0.95
|
89 |
+
max_output_tokens = 1024
|
90 |
+
palm.configure(api_key=google_palm_key)
|
91 |
+
defaults = {
|
92 |
+
'model': model,
|
93 |
+
'temperature': temperature,
|
94 |
+
'candidate_count': candidate_count,
|
95 |
+
'top_k': top_k,
|
96 |
+
'top_p': top_p,
|
97 |
+
'max_output_tokens': max_output_tokens,
|
98 |
+
'stop_sequences': [],
|
99 |
+
'safety_settings': [{"category":"HARM_CATEGORY_DEROGATORY","threshold":1},{"category":"HARM_CATEGORY_TOXICITY","threshold":1},{"category":"HARM_CATEGORY_VIOLENCE","threshold":2},{"category":"HARM_CATEGORY_SEXUAL","threshold":2},{"category":"HARM_CATEGORY_MEDICAL","threshold":2},{"category":"HARM_CATEGORY_DANGEROUS","threshold":2}],
|
100 |
+
}
|
101 |
+
|
102 |
+
response = palm.generate_text(
|
103 |
+
**defaults,
|
104 |
+
prompt=prompt
|
105 |
+
)
|
106 |
+
return "", response.result
|
107 |
+
except openai.error.ServiceUnavailableError:
|
108 |
+
print(f"Exception Name: {type(exception).__name__}")
|
109 |
+
print(exception)
|
110 |
+
return f" {optionSelection} test_handler Error - {exception}", ""
|
111 |
+
|
112 |
+
def test_handler(optionSelection, prompt: str = TEST_MESSAGE, openai_model_name: str ="gpt-4", google_model_name: str ="models/text-bison-001"):
|
113 |
match optionSelection:
|
114 |
case "OpenAI API":
|
115 |
+
message, response = openai_text_completion(prompt,openai_model_name)
|
116 |
return message, response
|
117 |
case "Azure OpenAI API":
|
118 |
+
message, response = azure_openai_text_completion(prompt,openai_model_name)
|
119 |
return message, response
|
120 |
case "Google PaLM API":
|
121 |
+
message, response = palm_text_completion(prompt,google_model_name)
|
122 |
+
return message, response
|
123 |
case "Llama 2":
|
124 |
return "", ""
|
125 |
case _:
|
|
|
133 |
llm_options = gr.Radio(llm_api_options, label="Select one", info="Which service do you want to use?", value="OpenAI API")
|
134 |
with gr.Tab("Open AI"):
|
135 |
openai_model = gr.Dropdown(openai_models, value="gpt-4", label="Model", info="Select one, for Natural language")
|
136 |
+
with gr.Tab("Google PaLM API"):
|
137 |
+
google_model_name = gr.Dropdown(google_palm_models,
|
138 |
+
value="models/text-bison-001", label="Model", info="Select one, for Natural language")
|
139 |
with gr.Row():
|
140 |
with gr.Column():
|
141 |
test_string = gr.Textbox(label="Try String", value=TEST_MESSAGE, lines=2)
|
|
|
146 |
|
147 |
test_button.click(
|
148 |
fn=test_handler,
|
149 |
+
inputs=[llm_options, test_string, openai_model, google_model_name],
|
150 |
outputs=[test_string_output_info, test_string_response]
|
151 |
)
|
152 |
|