Spaces:
Runtime error
Runtime error
Commit
·
f7a02b6
1
Parent(s):
d863fc7
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
import re
|
2 |
import openai
|
|
|
3 |
import gradio as gr
|
4 |
|
5 |
# Define a regular expression to match Python code blocks
|
6 |
code_pattern = re.compile(r"```python\n(.*?)\n```", re.DOTALL)
|
7 |
|
8 |
-
# Define the chat function
|
9 |
-
def
|
10 |
# Check if an API key has been provided
|
11 |
if api_key is None:
|
12 |
return "Please enter your OpenAI API key and try again."
|
@@ -18,7 +19,7 @@ def chat(api_key, model, message):
|
|
18 |
highlighted_message = message
|
19 |
for code in code_blocks:
|
20 |
highlighted_code = f'<span style="background-color: #FFFF00;">{code}</span>'
|
21 |
-
highlighted_message =
|
22 |
|
23 |
# Set up the OpenAI API request
|
24 |
response = openai.Completion.create(
|
@@ -38,10 +39,33 @@ def chat(api_key, model, message):
|
|
38 |
highlighted_bot_response = bot_response
|
39 |
for code in code_blocks:
|
40 |
highlighted_code = f'<span style="background-color: #FFFF00;">{code}</span>'
|
41 |
-
highlighted_bot_response =
|
42 |
|
43 |
return highlighted_bot_response
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
# Define a function to extract code blocks from a string
|
46 |
def extract_code_blocks(text):
|
47 |
code_blocks = []
|
@@ -52,19 +76,38 @@ def extract_code_blocks(text):
|
|
52 |
# Define the Gradio interface
|
53 |
api_key_input = gr.inputs.Textbox(label="OpenAI API Key", default=None)
|
54 |
model_input = gr.inputs.Dropdown(
|
55 |
-
label="Select model",
|
56 |
-
choices=["
|
57 |
-
default="
|
58 |
)
|
59 |
message_input = gr.inputs.Textbox(label="Enter your message here")
|
60 |
output = gr.outputs.HTML(label="Bot response")
|
61 |
|
62 |
-
|
63 |
-
fn=
|
64 |
inputs=[api_key_input, model_input, message_input],
|
65 |
outputs=output,
|
66 |
title="OpenAI Chatbot",
|
67 |
-
description="Enter your message below to chat with an AI",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
theme="compact",
|
69 |
layout="vertical",
|
70 |
allow_flagging=False,
|
@@ -72,5 +115,45 @@ chat_button = gr.Interface(
|
|
72 |
allow_share=False,
|
73 |
)
|
74 |
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import re
|
2 |
import openai
|
3 |
+
import transformers
|
4 |
import gradio as gr
|
5 |
|
6 |
# Define a regular expression to match Python code blocks
|
7 |
code_pattern = re.compile(r"```python\n(.*?)\n```", re.DOTALL)
|
8 |
|
9 |
+
# Define the chat function for OpenAI API
|
10 |
+
def openai_chat(api_key, model, message):
|
11 |
# Check if an API key has been provided
|
12 |
if api_key is None:
|
13 |
return "Please enter your OpenAI API key and try again."
|
|
|
19 |
highlighted_message = message
|
20 |
for code in code_blocks:
|
21 |
highlighted_code = f'<span style="background-color: #FFFF00;">{code}</span>'
|
22 |
+
highlighted_message = re.sub(f'```python\n{code}\n```', highlighted_code, highlighted_message, flags=re.IGNORECASE)
|
23 |
|
24 |
# Set up the OpenAI API request
|
25 |
response = openai.Completion.create(
|
|
|
39 |
highlighted_bot_response = bot_response
|
40 |
for code in code_blocks:
|
41 |
highlighted_code = f'<span style="background-color: #FFFF00;">{code}</span>'
|
42 |
+
highlighted_bot_response = re.sub(f'{code}', highlighted_code, highlighted_bot_response, flags=re.IGNORECASE)
|
43 |
|
44 |
return highlighted_bot_response
|
45 |
|
46 |
+
# Define the chat function for Hugging Face API
|
47 |
+
def hf_chat(model_name, message):
|
48 |
+
# Load the model
|
49 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
|
50 |
+
model = transformers.AutoModelForCausalLM.from_pretrained(model_name)
|
51 |
+
|
52 |
+
# Encode the message using the model's tokenizer
|
53 |
+
input_ids = tokenizer.encode(message, return_tensors="pt")
|
54 |
+
|
55 |
+
# Generate a response from the model
|
56 |
+
output = model.generate(
|
57 |
+
input_ids=input_ids,
|
58 |
+
max_length=1024,
|
59 |
+
do_sample=True,
|
60 |
+
top_p=0.9,
|
61 |
+
top_k=0,
|
62 |
+
)
|
63 |
+
|
64 |
+
# Decode the response
|
65 |
+
bot_response = tokenizer.decode(output[0], skip_special_tokens=True)
|
66 |
+
|
67 |
+
return bot_response
|
68 |
+
|
69 |
# Define a function to extract code blocks from a string
|
70 |
def extract_code_blocks(text):
|
71 |
code_blocks = []
|
|
|
76 |
# Define the Gradio interface
|
77 |
api_key_input = gr.inputs.Textbox(label="OpenAI API Key", default=None)
|
78 |
model_input = gr.inputs.Dropdown(
|
79 |
+
label="Select OpenAI model",
|
80 |
+
choices=["davinci", "davinci-002", "davinci-003"],
|
81 |
+
default="davinci-003",
|
82 |
)
|
83 |
message_input = gr.inputs.Textbox(label="Enter your message here")
|
84 |
output = gr.outputs.HTML(label="Bot response")
|
85 |
|
86 |
+
openai_chat_button = gr.Interface(
|
87 |
+
fn=openai_chat,
|
88 |
inputs=[api_key_input, model_input, message_input],
|
89 |
outputs=output,
|
90 |
title="OpenAI Chatbot",
|
91 |
+
description="Enter your message below to chat with an OpenAI AI",
|
92 |
+
theme="compact",
|
93 |
+
layout="vertical",
|
94 |
+
allow_flagging=False,
|
95 |
+
allow_screenshot=False,
|
96 |
+
allow_share=False,
|
97 |
+
)
|
98 |
+
|
99 |
+
hf_chat_models = ["microsoft/DialoGPT-large", "microsoft/DialoGPT-medium", "microsoft/DialoGPT-small"]
|
100 |
+
hf_model_input = gr.inputs.Dropdown(
|
101 |
+
label="Select Hugging Face model",
|
102 |
+
choices=hf_chat_models,
|
103 |
+
default=hf_chat_models[0],
|
104 |
+
)
|
105 |
+
hf_chat_button = gr.Interface(
|
106 |
+
fn=hf_chat,
|
107 |
+
inputs=[hf_model_input, message_input],
|
108 |
+
outputs=output,
|
109 |
+
title="Hugging Face Chatbot",
|
110 |
+
description="Enter your message below to chat with a Hugging Face AI",
|
111 |
theme="compact",
|
112 |
layout="vertical",
|
113 |
allow_flagging=False,
|
|
|
115 |
allow_share=False,
|
116 |
)
|
117 |
|
118 |
+
chat_button = gr.Interface(
|
119 |
+
inputs="text",
|
120 |
+
outputs=["text", "text"],
|
121 |
+
layout="vertical",
|
122 |
+
title="Chatbot",
|
123 |
+
description="Enter your message below to chat with an AI",
|
124 |
+
theme="compact",
|
125 |
+
allow_flagging=False,
|
126 |
+
allow_screenshot=False,
|
127 |
+
allow_share=False,
|
128 |
+
examples=[
|
129 |
+
["Hello, how are you?", ""],
|
130 |
+
["What's the weather like today?", ""],
|
131 |
+
["Can you help me with some Python code?", "```python\nfor i in range(10):\n print(i)\n```"],
|
132 |
+
],
|
133 |
+
)
|
134 |
+
|
135 |
+
chat_button.set_config(gr.InterfaceConfig(
|
136 |
+
dependencies=["transformers"],
|
137 |
+
inputs=[gr.InterfaceComponent(model_input, label="OpenAI Model", visible=False)],
|
138 |
+
outputs=[gr.InterfaceComponent(output, label="Bot Response"), gr.InterfaceComponent(api_key_input, label="OpenAI API Key", visible=False)],
|
139 |
+
layout="vertical",
|
140 |
+
title="OpenAI Chatbot (API Key Required)",
|
141 |
+
))
|
142 |
+
|
143 |
+
chat_button.set_config(gr.InterfaceConfig(
|
144 |
+
dependencies=["transformers"],
|
145 |
+
inputs=[gr.InterfaceComponent(hf_model_input, label="Hugging Face Model", visible=False)],
|
146 |
+
outputs=[gr.InterfaceComponent(output, label="Bot Response")],
|
147 |
+
layout="vertical",
|
148 |
+
title="Hugging Face Chatbot",
|
149 |
+
))
|
150 |
+
|
151 |
+
chat_button.set_config(gr.InterfaceConfig(
|
152 |
+
dependencies=["transformers"],
|
153 |
+
inputs=[gr.InterfaceComponent(model_input, label="OpenAI Model"), gr.InterfaceComponent(hf_model_input, label="Hugging Face Model", visible=False)],
|
154 |
+
outputs=[gr.InterfaceComponent(output, label="Bot Response"), gr.InterfaceComponent(api_key_input, label="OpenAI API Key")],
|
155 |
+
layout="vertical",
|
156 |
+
title="Chatbot",
|
157 |
+
))
|
158 |
+
|
159 |
+
chat_button.test_launch() # Launch the Gradio interface
|