Spaces:
Sleeping
Sleeping
import streamlit as st | |
import requests | |
import os | |
# Obtener el token de los secretos | |
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-Guard-2-8B" | |
headers = {"Authorization": f"Bearer {os.getenv('YOUR_HUGGING_FACE_TOKEN')}"} | |
def query(payload): | |
response = requests.post(API_URL, headers=headers, json=payload) | |
try: | |
response.raise_for_status() | |
return response.json() | |
except requests.exceptions.HTTPError as err: | |
st.error(f"HTTP error occurred: {err}") | |
except Exception as err: | |
st.error(f"Other error occurred: {err}") | |
return None | |
st.title("LLaMA Chatbot") | |
st.subheader("Ask anything to the LLaMA model!") | |
user_input = st.text_input("You: ") | |
if user_input: | |
output = query({"inputs": user_input}) | |
# Imprimir la respuesta completa para depuración | |
st.write("API response:", output) | |
if output: | |
# Verificar las posibles claves en la respuesta | |
if "generated_text" in output: | |
response = output["generated_text"] | |
elif isinstance(output, list) and "generated_text" in output[0]: | |
response = output[0]["generated_text"] | |
else: | |
response = "Sorry, I couldn't generate a response." | |
else: | |
response = "Sorry, I couldn't generate a response." | |
st.write(f"Chatbot: {response}") | |