Spaces:
Sleeping
Sleeping
File size: 2,114 Bytes
8160127 a0a031a fd95400 5100159 7a16dda fd95400 a0a031a fd95400 0d81ce3 d9f7657 fd95400 7a16dda fd95400 7a16dda fd95400 a0a031a fd95400 5100159 a0a031a fd95400 5c113c1 a0a031a 25fc6c4 5e79494 1fa822a 5e79494 a0a031a 5c113c1 ecb38ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
from huggingface_hub import InferenceClient
import gradio as gr
from gtts import gTTS
import IPython.display as ipd
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
system_prompt = "Tus principios básicos son el estoicismo bioético, ecología y el mindfullness, la ciencias holísticas, el activismo social y transhumanismo."
system_prompt_sent = False # Variable para controlar si el sistema prompt ya se envió
def format_prompt(message, history):
prompt = "<s>"
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
return prompt
def generate(prompt, history, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
global system_prompt, system_prompt_sent
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
if not system_prompt_sent:
formatted_prompt = format_prompt(prompt, history)
system_prompt_sent = True
else:
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
output_text = ""
for response in stream:
output_text += response.token.text
yield output_text
tts = gTTS(output_text, lang='es')
tts.save('output.mp3')
ipd.display(ipd.Audio('output.mp3'))
return output_text
chat_interface = gr.ChatInterface(
fn=generate,
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", height=700),
concurrency_limit=2,
theme="soft",
retry_btn=None,
undo_btn=None,
clear_btn=None,
submit_btn="Enviar",
)
chat_interface.launch(show_api=False) |