Spaces:
Runtime error
Runtime error
Alex
commited on
Commit
·
7f04bf9
1
Parent(s):
6c819be
Rename app to app.py
Browse files
app
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
def greet(name):
|
4 |
-
return "Hello " + name + "!!"
|
5 |
-
|
6 |
-
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit_chat import message
|
3 |
+
from streamlit_extras.colored_header import colored_header
|
4 |
+
from streamlit_extras.add_vertical_space import add_vertical_space
|
5 |
+
from hugchat import hugchat
|
6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
+
import torch
|
8 |
+
|
9 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
+
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained("Celestinian/PromptGPT")
|
12 |
+
model = AutoModelForCausalLM.from_pretrained("Celestinian/PromptGPT")
|
13 |
+
|
14 |
+
st.set_page_config(page_title="EinfachChat")
|
15 |
+
|
16 |
+
# Sidebar contents
|
17 |
+
with st.sidebar:
|
18 |
+
st.title('EinfachChat')
|
19 |
+
max_length = st.slider('Max Length', min_value=10, max_value=100, value=30)
|
20 |
+
do_sample = st.checkbox('Do Sample', value=True)
|
21 |
+
temperature = st.slider('Temperature', min_value=0.1, max_value=1.0, value=0.4)
|
22 |
+
no_repeat_ngram_size = st.slider('No Repeat N-Gram Size', min_value=1, max_value=10, value=1)
|
23 |
+
top_k = st.slider('Top K', min_value=1, max_value=100, value=50)
|
24 |
+
top_p = st.slider('Top P', min_value=0.1, max_value=1.0, value=0.2)
|
25 |
+
|
26 |
+
# Rest of your original Streamlit code ...
|
27 |
+
|
28 |
+
def generate_text(prompt, max_length, do_sample, temperature, no_repeat_ngram_size, top_k, top_p):
|
29 |
+
formatted_prompt = "\n" + prompt
|
30 |
+
if not ',' in prompt:
|
31 |
+
formatted_prompt += ','
|
32 |
+
prompt = tokenizer(formatted_prompt, return_tensors='pt')
|
33 |
+
prompt = {key: value.to(device) for key, value in prompt.items()}
|
34 |
+
out = model.generate(**prompt, max_length=max_length, do_sample=do_sample, temperature=temperature,
|
35 |
+
no_repeat_ngram_size=no_repeat_ngram_size, top_k=top_k, top_p=top_p)
|
36 |
+
output = tokenizer.decode(out[0])
|
37 |
+
clean_output = output.replace('\n', '\n')
|
38 |
+
return clean_output
|
39 |
+
|
40 |
+
# Inside the conditional display part, replace
|
41 |
+
# response = generate_response(user_input)
|
42 |
+
# with
|
43 |
+
response = generate_text(user_input, max_length, do_sample, temperature, no_repeat_ngram_size, top_k, top_p)
|