Spaces:
Sleeping
Sleeping
robert
commited on
Commit
·
90af0e7
1
Parent(s):
ba33077
Shiping spaces model
Browse files- app.py +169 -12
- askbakingtop.json +0 -0
app.py
CHANGED
@@ -1,22 +1,179 @@
|
|
1 |
-
import
|
2 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
)
|
10 |
-
except subprocess.CalledProcessError as e:
|
11 |
-
print(f"Failed to install packages from {requirements_file}: {e}")
|
12 |
|
13 |
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
22 |
demo.launch()
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
from threading import Thread
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import spaces
|
8 |
+
import torch
|
9 |
+
from langchain.schema import AIMessage, HumanMessage
|
10 |
+
from langchain_openai import ChatOpenAI
|
11 |
+
from pydantic import BaseModel, SecretStr
|
12 |
+
from transformers import (
|
13 |
+
AutoModelForCausalLM,
|
14 |
+
AutoTokenizer,
|
15 |
+
StoppingCriteria,
|
16 |
+
StoppingCriteriaList,
|
17 |
+
TextIteratorStreamer,
|
18 |
+
)
|
19 |
+
|
20 |
+
tokenizer = AutoTokenizer.from_pretrained("ContextualAI/archangel_sft-kto_llama30b")
|
21 |
+
model = AutoModelForCausalLM.from_pretrained("ContextualAI/archangel_sft-kto_llama30b")
|
22 |
+
model = model.to("cuda:0")
|
23 |
+
|
24 |
+
|
25 |
+
class OAAPIKey(BaseModel):
|
26 |
+
openai_api_key: SecretStr
|
27 |
+
|
28 |
+
|
29 |
+
def set_openai_api_key(api_key: SecretStr):
|
30 |
+
os.environ["OPENAI_API_KEY"] = api_key.get_secret_value()
|
31 |
+
llm = ChatOpenAI(temperature=1.0, model="gpt-3.5-turbo-0125")
|
32 |
+
return llm
|
33 |
|
34 |
|
35 |
+
class StopOnSequence(StoppingCriteria):
|
36 |
+
def __init__(self, sequence, tokenizer):
|
37 |
+
self.sequence_ids = tokenizer.encode(sequence, add_special_tokens=False)
|
38 |
+
self.sequence_len = len(self.sequence_ids)
|
39 |
+
|
40 |
+
def __call__(
|
41 |
+
self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs
|
42 |
+
) -> bool:
|
43 |
+
if input_ids.shape[1] < self.sequence_len:
|
44 |
+
return False
|
45 |
+
return (
|
46 |
+
(
|
47 |
+
input_ids[0, -self.sequence_len:]
|
48 |
+
== torch.tensor(self.sequence_ids, device=input_ids.device)
|
49 |
+
)
|
50 |
+
.all()
|
51 |
+
.item()
|
52 |
)
|
|
|
|
|
53 |
|
54 |
|
55 |
+
@spaces.GPU(duration=120)
|
56 |
+
def spaces_model_predict(message: str, history: list[tuple[str, str]]):
|
57 |
+
history_transformer_format = history + [[message, ""]]
|
58 |
+
stop = StopOnSequence("<|human|>", tokenizer)
|
59 |
+
|
60 |
+
messages = "".join(
|
61 |
+
[
|
62 |
+
"".join(["\n<human>:" + item[0], "\n<ai>:" + item[1]])
|
63 |
+
for item in history_transformer_format
|
64 |
+
]
|
65 |
+
)
|
66 |
+
|
67 |
+
model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
|
68 |
+
streamer = TextIteratorStreamer(
|
69 |
+
tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
|
70 |
+
)
|
71 |
+
generate_kwargs = dict(
|
72 |
+
model_inputs,
|
73 |
+
streamer=streamer,
|
74 |
+
max_new_tokens=512,
|
75 |
+
do_sample=True,
|
76 |
+
top_p=0.95,
|
77 |
+
top_k=1000,
|
78 |
+
temperature=1.0,
|
79 |
+
num_beams=1,
|
80 |
+
stopping_criteria=StoppingCriteriaList([stop]),
|
81 |
+
)
|
82 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
83 |
+
t.start()
|
84 |
+
|
85 |
+
partial_message = ""
|
86 |
+
for new_token in streamer:
|
87 |
+
if new_token != "<":
|
88 |
+
partial_message += new_token
|
89 |
+
return partial_message
|
90 |
+
|
91 |
+
|
92 |
+
def predict(
|
93 |
+
message: str,
|
94 |
+
chat_history_openai: list[tuple[str, str]],
|
95 |
+
chat_history_spaces: list[tuple[str, str]],
|
96 |
+
openai_api_key: SecretStr,
|
97 |
+
):
|
98 |
+
openai_key_model = OAAPIKey(openai_api_key=openai_api_key)
|
99 |
+
openai_llm = set_openai_api_key(api_key=openai_key_model.openai_api_key)
|
100 |
|
101 |
+
# OpenAI
|
102 |
+
history_langchain_format_openai = []
|
103 |
+
for human, ai in chat_history_openai:
|
104 |
+
history_langchain_format_openai.append(HumanMessage(content=human))
|
105 |
+
history_langchain_format_openai.append(AIMessage(content=ai))
|
106 |
+
history_langchain_format_openai.append(HumanMessage(content=message))
|
107 |
|
108 |
+
openai_response = openai_llm.invoke(input=history_langchain_format_openai)
|
109 |
+
|
110 |
+
# Spaces Model
|
111 |
+
spaces_model_response = spaces_model_predict(message, chat_history_spaces)
|
112 |
+
|
113 |
+
chat_history_openai.append((message, openai_response.content))
|
114 |
+
chat_history_spaces.append((message, spaces_model_response))
|
115 |
+
return "", chat_history_openai, chat_history_spaces
|
116 |
+
|
117 |
+
|
118 |
+
with open("askbakingtop.json", "r") as file:
|
119 |
+
ask_baking_msgs = json.load(file)
|
120 |
+
|
121 |
+
|
122 |
+
with gr.Blocks() as demo:
|
123 |
+
with gr.Row():
|
124 |
+
with gr.Column(scale=1):
|
125 |
+
openai_api_key = gr.Textbox(
|
126 |
+
label="Please enter your OpenAI API key",
|
127 |
+
type="password",
|
128 |
+
elem_id="lets-chat-openai-api-key",
|
129 |
+
)
|
130 |
+
|
131 |
+
with gr.Row():
|
132 |
+
options = [ask["history"] for ask in random.sample(ask_baking_msgs, k=3)]
|
133 |
+
msg = gr.Dropdown(
|
134 |
+
options,
|
135 |
+
label="Please enter your message",
|
136 |
+
interactive=True,
|
137 |
+
multiselect=False,
|
138 |
+
allow_custom_value=True
|
139 |
+
)
|
140 |
+
|
141 |
+
with gr.Row():
|
142 |
+
with gr.Column(scale=1):
|
143 |
+
chatbot_openai = gr.Chatbot(label="OpenAI Chatbot 🏢")
|
144 |
+
with gr.Column(scale=1):
|
145 |
+
chatbot_spaces = gr.Chatbot(
|
146 |
+
label="Your own fine-tuned preference optimized Chatbot 💪"
|
147 |
+
)
|
148 |
+
|
149 |
+
with gr.Row():
|
150 |
+
submit_button = gr.Button("Submit")
|
151 |
+
|
152 |
+
with gr.Row():
|
153 |
+
clear = gr.ClearButton([msg])
|
154 |
+
|
155 |
+
def respond(
|
156 |
+
message: str,
|
157 |
+
chat_history_openai: list[tuple[str, str]],
|
158 |
+
chat_history_spaces: list[tuple[str, str]],
|
159 |
+
openai_api_key: SecretStr,
|
160 |
+
):
|
161 |
+
return predict(
|
162 |
+
message=message,
|
163 |
+
chat_history_openai=chat_history_openai,
|
164 |
+
chat_history_spaces=chat_history_spaces,
|
165 |
+
openai_api_key=openai_api_key,
|
166 |
+
)
|
167 |
|
168 |
+
submit_button.click(
|
169 |
+
fn=respond,
|
170 |
+
inputs=[
|
171 |
+
msg,
|
172 |
+
chatbot_openai,
|
173 |
+
chatbot_spaces,
|
174 |
+
openai_api_key,
|
175 |
+
],
|
176 |
+
outputs=[msg, chatbot_openai, chatbot_spaces],
|
177 |
+
)
|
178 |
|
|
|
179 |
demo.launch()
|
askbakingtop.json
ADDED
File without changes
|