Spaces:
Runtime error
Runtime error
import streamlit as st | |
import requests | |
import asyncio | |
import time | |
from ast import literal_eval | |
import urllib.parse | |
from dacite import from_dict | |
from together_web3.computer import LanguageModelInferenceRequest | |
from together_web3.together import TogetherWeb3 | |
st.title("GPT-JT") | |
if 'together_web3' not in st.session_state: | |
st.session_state.together_web3 = TogetherWeb3() | |
if 'loop' not in st.session_state: | |
st.session_state.loop = asyncio.new_event_loop() | |
async def _inference(prompt, max_tokens): | |
result = await st.session_state.together_web3.language_model_inference( | |
from_dict( | |
data_class=LanguageModelInferenceRequest, | |
data={ | |
"model": "Together-gpt-JT-6B-v1", | |
"max_tokens": max_tokens, | |
"prompt": prompt, | |
} | |
), | |
) | |
return result | |
def infer(prompt, | |
model_name, | |
max_new_tokens=10, | |
temperature=0.0, | |
top_p=1.0, | |
num_completions=1, | |
seed=42, | |
stop="\n"): | |
print("prompt", prompt) | |
response = st.session_state.loop.run_until_complete(_inference(prompt, int(max_new_tokens))) | |
print(response) | |
return response.choices[0].text | |
col1, col2 = st.columns([1, 3]) | |
with col1: | |
model_name = st.selectbox("Model", ["GPT-JT-6B-v1"]) | |
max_new_tokens = st.text_input('Max new tokens', "10") | |
temperature = st.text_input('temperature', "0.0") | |
top_p = st.text_input('top_p', "1.0") | |
num_completions = st.text_input('num_completions (only the best one will be returend)', "1") | |
stop = st.text_input('stop, split by;', r'\n') | |
seed = st.text_input('seed', "42") | |
with col2: | |
s_example = "Please answer the following question:\n\nQuestion: Where is Zurich?\nAnswer:" | |
prompt = st.text_area( | |
"Prompt", | |
value=s_example, | |
max_chars=4096, | |
height=400, | |
) | |
generated_area = st.empty() | |
generated_area.text("(Generate here)") | |
button_submit = st.button("Submit") | |
if button_submit: | |
generated_area.text(prompt) | |
report_text = infer( | |
prompt, model_name=model_name, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, | |
num_completions=num_completions, seed=seed, stop=literal_eval("'''"+stop+"'''"), | |
) | |
generated_area.text(prompt + report_text) | |