Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, Request, Body | |
| from huggingface_hub import InferenceClient | |
| import random | |
| API_URL = "https://api-inference.huggingface.co/models/" | |
| client = InferenceClient( | |
| "mistralai/Mistral-7B-Instruct-v0.1" | |
| ) | |
| app = FastAPI() | |
| def format_prompt(message, history): | |
| prompt = "<s>" | |
| for user_prompt, bot_response in history: | |
| prompt += f"[INST] {user_prompt} [/INST]" | |
| prompt += f" {bot_response}</s> " | |
| prompt += f"[INST] {message} [/INST]" | |
| return prompt | |
| def generate_text(request: Request, prompt: str = Body()): | |
| history = [] # You might need to handle this based on your actual usage | |
| print(f"request + {request}") | |
| temperature = request.headers.get("temperature", 0.5) | |
| # print(f"temperature + {temperature}") | |
| top_p = request.headers.get("top_p", 0.95) | |
| # print(f"top_p + {top_p}") | |
| repetition_penalty = request.headers.get("repetition_penalty", 1.0) | |
| # print(f"repetition_penalty + {repetition_penalty}") | |
| formatted_prompt = format_prompt(prompt, history) | |
| print(f"formatted_prompt + {formatted_prompt}") | |
| stream = client.text_generation( | |
| formatted_prompt, | |
| temperature=temperature, | |
| max_new_tokens=512, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=random.randint(0, 10**7), | |
| stream=False, | |
| details=True, | |
| return_full_text=True | |
| ) | |
| # output = "" | |
| # for response in stream: | |
| # output += response.token.text | |
| # yield output | |
| # return output[len(output) - 1] | |
| return stream |