Spaces:
Running
Running
File size: 2,689 Bytes
60cca92 61d9097 60cca92 61d9097 a328c39 61d9097 60cca92 61d9097 522f817 61d9097 a328c39 61d9097 a328c39 61d9097 a328c39 61d9097 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import streamlit as st
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
pipeline,
BitsAndBytesConfig
)
import torch
# 1. Configuraci贸n del Modelo
@st.cache_resource
def load_model():
try:
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16
)
model = AutoModelForCausalLM.from_pretrained(
"microsoft/Phi-3-mini-4k-instruct",
device_map="auto",
quantization_config=quantization_config,
trust_remote_code=True
)
tokenizer = AutoTokenizer.from_pretrained(
"microsoft/Phi-3-mini-4k-instruct"
)
return model, tokenizer
except Exception as e:
st.error(f"Error cargando el modelo: {str(e)}")
return None, None
# 2. Interfaz de Streamlit
st.title("馃 Chatbot Optimizado para M1")
st.markdown("Usando Microsoft Phi-3-mini - [Hugging Face](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct)")
# 3. Inicializaci贸n de Sesi贸n
if "messages" not in st.session_state:
st.session_state.messages = [
{"role": "assistant", "content": "隆Hola! Soy tu asistente AI. 驴En qu茅 puedo ayudarte?"}
]
# 4. Carga del Modelo
model, tokenizer = load_model()
# 5. Funci贸n de Generaci贸n
def generate_response(prompt):
try:
messages = [
{"role": "user", "content": prompt}
]
inputs = tokenizer.apply_chat_template(
messages,
return_tensors="pt"
).to(model.device)
outputs = model.generate(
inputs,
max_new_tokens=512,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
return tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True)
except Exception as e:
return f"Error generando respuesta: {str(e)}"
# 6. Interacci贸n del Usuario
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Escribe tu mensaje..."):
# Mostrar input del usuario
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Generar respuesta
with st.chat_message("assistant"):
with st.spinner("Pensando..."):
response = generate_response(prompt)
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response}) |