Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,21 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
-
import torch
|
4 |
import requests
|
5 |
import os
|
6 |
|
7 |
# Obtener el token de los secretos
|
8 |
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-Guard-2-8B"
|
9 |
-
headers = {"Authorization": f"Bearer {os.getenv('
|
10 |
|
11 |
def query(payload):
|
12 |
response = requests.post(API_URL, headers=headers, json=payload)
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
st.title("LLaMA Chatbot")
|
16 |
st.subheader("Ask anything to the LLaMA model!")
|
@@ -18,5 +23,19 @@ st.subheader("Ask anything to the LLaMA model!")
|
|
18 |
user_input = st.text_input("You: ")
|
19 |
if user_input:
|
20 |
output = query({"inputs": user_input})
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
st.write(f"Chatbot: {response}")
|
|
|
1 |
import streamlit as st
|
|
|
|
|
2 |
import requests
|
3 |
import os
|
4 |
|
5 |
# Obtener el token de los secretos
|
6 |
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-Guard-2-8B"
|
7 |
+
headers = {"Authorization": f"Bearer {os.getenv('HF_API_TOKEN')}"}
|
8 |
|
9 |
def query(payload):
|
10 |
response = requests.post(API_URL, headers=headers, json=payload)
|
11 |
+
try:
|
12 |
+
response.raise_for_status()
|
13 |
+
return response.json()
|
14 |
+
except requests.exceptions.HTTPError as err:
|
15 |
+
st.error(f"HTTP error occurred: {err}")
|
16 |
+
except Exception as err:
|
17 |
+
st.error(f"Other error occurred: {err}")
|
18 |
+
return None
|
19 |
|
20 |
st.title("LLaMA Chatbot")
|
21 |
st.subheader("Ask anything to the LLaMA model!")
|
|
|
23 |
user_input = st.text_input("You: ")
|
24 |
if user_input:
|
25 |
output = query({"inputs": user_input})
|
26 |
+
|
27 |
+
# Imprimir la respuesta completa para depuraci贸n
|
28 |
+
st.write("API response:", output)
|
29 |
+
|
30 |
+
if output:
|
31 |
+
# Verificar las posibles claves en la respuesta
|
32 |
+
if "generated_text" in output:
|
33 |
+
response = output["generated_text"]
|
34 |
+
elif isinstance(output, list) and "generated_text" in output[0]:
|
35 |
+
response = output[0]["generated_text"]
|
36 |
+
else:
|
37 |
+
response = "Sorry, I couldn't generate a response."
|
38 |
+
else:
|
39 |
+
response = "Sorry, I couldn't generate a response."
|
40 |
+
|
41 |
st.write(f"Chatbot: {response}")
|