Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
import requests | |
import os | |
# Obtener el token de los secretos | |
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-Guard-2-8B" | |
headers = {"Authorization": f"Bearer {os.getenv('YOUR_HUGGING_FACE_TOKEN')}"} | |
def query(payload): | |
response = requests.post(API_URL, headers=headers, json=payload) | |
return response.json() | |
st.title("LLaMA Chatbot") | |
st.subheader("Ask anything to the LLaMA model!") | |
user_input = st.text_input("You: ") | |
if user_input: | |
output = query({"inputs": user_input}) | |
response = output.get("generated_text", "Sorry, I couldn't generate a response.") | |
st.write(f"Chatbot: {response}") | |