Spaces:
Runtime error
Runtime error
from threading import Thread | |
import gradio as gr | |
import inspect | |
from gradio import routes | |
from typing import List, Type | |
from petals import AutoDistributedModelForCausalLM | |
from transformers import AutoTokenizer | |
import requests, os, re, asyncio, json | |
loop = asyncio.get_event_loop() | |
# init code | |
def get_types(cls_set: List[Type], component: str): | |
docset = [] | |
types = [] | |
if component == "input": | |
for cls in cls_set: | |
doc = inspect.getdoc(cls) | |
doc_lines = doc.split("\n") | |
docset.append(doc_lines[1].split(":")[-1]) | |
types.append(doc_lines[1].split(")")[0].split("(")[-1]) | |
else: | |
for cls in cls_set: | |
doc = inspect.getdoc(cls) | |
doc_lines = doc.split("\n") | |
docset.append(doc_lines[-1].split(":")[-1]) | |
types.append(doc_lines[-1].split(")")[0].split("(")[-1]) | |
return docset, types | |
routes.get_types = get_types | |
# App code | |
model_name = "petals-team/StableBeluga2" | |
#daekeun-ml/Llama-2-ko-instruct-13B | |
#quantumaikr/llama-2-70b-fb16-korean | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = None | |
history = { | |
"":{ | |
} | |
} | |
def check(model_name): | |
data = requests.get("https://health.petals.dev/api/v1/state").json() | |
out = [] | |
for d in data['model_reports']: | |
if d['name'] == model_name: | |
if d['state']=="healthy": | |
return True | |
return False | |
def init(): | |
global model | |
if check(model_name): | |
model = AutoDistributedModelForCausalLM.from_pretrained(model_name) | |
def chat(id, npc, text): | |
if model == None: | |
init() | |
return "no model" | |
# get_coin endpoint | |
response = requests.post("https://ldhldh-api-for-unity.hf.space/run/predict_6", json={ | |
"data": [ | |
id, | |
]}).json() | |
coin = response["data"][0] | |
if int(coin) == 0: | |
return "no coin" | |
# model inference | |
if check(model_name): | |
global history | |
if not npc in npc_story: | |
return "no npc" | |
if not npc in history: | |
history[npc] = {} | |
if not id in history[npc]: | |
history[npc][id] = "" | |
if len(history[npc][id].split("###")) > 10: | |
history[npc][id] = "###" + history[npc][id].split("###", 3)[3] | |
npc_list = str([k for k in npc_story.keys()]).replace('\'', '') | |
town_story = f"""[{id}์ ๋ง์] | |
์ธ๋ด ๊ณณ์ ์กฐ๊ทธ๋ง ์ฌ์ ์ฌ๋ฌ ์ฃผ๋ฏผ๋ค์ด ๋ชจ์ฌ ์ด๊ณ ์์ต๋๋ค. | |
ํ์ฌ {npc_list}์ด ์ด๊ณ ์์ต๋๋ค.""" | |
system_message = f"""1. ๋น์ ์ ํ๊ตญ์ด์ ๋ฅ์ํฉ๋๋ค. | |
2. ๋น์ ์ ์ง๊ธ ์ญํ ๊ทน์ ํ๊ณ ์์ต๋๋ค. {npc}์ ๋ฐ์์ ์์ํ๊ณ ๋งค๋ ฅ์ ์ด๊ฒ ํํํฉ๋๋ค. | |
3. ๋น์ ์ {npc}์ ๋๋ค. {npc}์ ์ ์ฅ์์ ์๊ฐํ๊ณ ๋งํฉ๋๋ค. | |
4. ์ฃผ์ด์ง๋ ์ ๋ณด๋ฅผ ๋ฐํ์ผ๋ก ๊ฐ์ฐ์ฑ์๊ณ ์ค๊ฐ๋๋ {npc}์ ๋์ฌ๋ฅผ ์์ฑํ์ธ์. | |
5. ์ฃผ์ด์ง๋ {npc}์ ์ ๋ณด๋ฅผ ์ ์คํ๊ฒ ์ฝ๊ณ , ๊ณผํ์ง ์๊ณ ๋ด๋ฐฑํ๊ฒ ์บ๋ฆญํฐ๋ฅผ ์ฐ๊ธฐํ์ธ์. | |
6. User์ ์ญํ ์ ์ ๋๋ก ์นจ๋ฒํ์ง ๋ง์ธ์. ๊ฐ์ ๋ง์ ๋ฐ๋ณตํ์ง ๋ง์ธ์. | |
7. {npc}์ ๋งํฌ๋ฅผ ์ง์ผ์ ์์ฑํ์ธ์.""" | |
prom = f"""<<SYS>> | |
{system_message}<</SYS>> | |
{town_story} | |
### ์บ๋ฆญํฐ ์ ๋ณด: {npc_story[npc]} | |
### ๋ช ๋ น์ด: | |
{npc}์ ์ ๋ณด๋ฅผ ์ฐธ๊ณ ํ์ฌ {npc}์ด ํ ๋ง์ ์ํฉ์ ๋ง์ถฐ ์์ฐ์ค๋ฝ๊ฒ ์์ฑํด์ฃผ์ธ์. | |
{history[npc][id]} | |
### User: | |
{text} | |
### {npc}: | |
""" | |
inputs = tokenizer(prom, return_tensors="pt")["input_ids"] | |
outputs = model.generate(inputs, do_sample=True, temperature=0.6, top_p=0.75, max_new_tokens=100) | |
output = tokenizer.decode(outputs[0])[len(prom)+3:-1].split("<")[0].split("###")[0].replace(". ", ".\n") | |
print(outputs) | |
print(output) | |
else: | |
output = "no model" | |
# add_transaction endpoint | |
response = requests.post("https://ldhldh-api-for-unity.hf.space/run/predict_5", json={ | |
"data": [ | |
id, | |
"inference", | |
"### input:\n" + prompt + "\n\n### output:\n" + output | |
]}).json() | |
d = response["data"][0] | |
return output | |
with gr.Blocks() as demo: | |
count = 0 | |
aa = gr.Interface( | |
fn=chat, | |
inputs=["text","text","text"], | |
outputs="text", | |
description="chat, ai ์๋ต์ ๋ฐํํฉ๋๋ค. ๋ด๋ถ์ ์ผ๋ก ํธ๋์ญ์ ์์ฑ. \n /run/predict", | |
) | |
demo.queue(max_size=32).launch(enable_queue=True) |