|
from fastapi import FastAPI |
|
from pydantic import BaseModel |
|
import uvicorn |
|
import prompt_style |
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_1(item: Item): |
|
messages = [ |
|
{"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"}, |
|
{"role": "user", "content": "Who are you?"}, |
|
] |
|
|
|
terminators = [ |
|
pipeline.tokenizer.eos_token_id, |
|
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>") |
|
] |
|
|
|
outputs = pipeline( |
|
messages, |
|
max_new_tokens=item.max_new_tokens, |
|
eos_token_id=terminators, |
|
do_sample=True, |
|
temperature=item.temperature, |
|
top_p=item.top_p, |
|
) |
|
return outputs[0]["generated_text"][-1] |
|
|
|
|
|
|
|
|
|
|
|
|
|
client = InferenceClient(model_id) |
|
|
|
class Item(BaseModel): |
|
prompt: str |
|
history: list |
|
system_prompt: str |
|
temperature: float = 0.6 |
|
max_new_tokens: int = 1024 |
|
top_p: float = 0.95 |
|
seed : int = 42 |
|
|
|
app = FastAPI() |
|
|
|
def format_prompt(item: Item): |
|
messages = [ |
|
{"role": "system", "content": prompt_style.data}, |
|
] |
|
for it in item.history: |
|
messages.append[{"role" : "user", "content": it[0]}] |
|
messages.append[{"role" : "assistant", "content": it[1]}] |
|
return messages |
|
|
|
def generate(item: Item): |
|
temperature = float(item.temperature) |
|
if temperature < 1e-2: |
|
temperature = 1e-2 |
|
top_p = float(item.top_p) |
|
|
|
generate_kwargs = dict( |
|
temperature=temperature, |
|
max_new_tokens=item.max_new_tokens, |
|
top_p=top_p, |
|
repetition_penalty=item.repetition_penalty, |
|
do_sample=True, |
|
seed=item.seed, |
|
) |
|
|
|
formatted_prompt = format_prompt(item) |
|
|
|
model_id = "meta-llama/Meta-Llama-3-8B-Instruct" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto",) |
|
|
|
input_ids = tokenizer.apply_chat_template(formatted_prompt, add_generation_prompt=True, return_tensors="pt").to(model.device) |
|
|
|
terminators = [ |
|
tokenizer.eos_token_id, |
|
tokenizer.convert_tokens_to_ids("<|eot_id|>") |
|
] |
|
|
|
outputs = model.generate(input_ids, eos_token_id=terminators, do_sample=True, **generate_kwargs) |
|
response = outputs[0][input_ids.shape[-1]:] |
|
return tokenizer.decode(response, skip_special_tokens=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.post("/generate/") |
|
async def generate_text(item: Item): |
|
ans = generate(item) |
|
return {"response": ans} |