alex-abb's picture
Update app.py
394e3ca verified
raw
history blame
1.07 kB
import torch
import os
import requests
import spaces
import gradio as gr
api_token = os.environ.get("TOKEN")
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
headers = {"Authorization": f"Bearer {api_token}"}
@spaces.GPU
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
def analyze_sentiment(text):
prompt = f"TYou're a sentiment analyzer. Your role is to evaluate the general feeling of the prompt. Answer only with 'positive' or 'negative'. Don't add any explanations. Here's the text to analyze (don't add any text) : {text}"
output = query({
"inputs": prompt,
})
# Assurez-vous de gérer correctement la sortie de l'API
if isinstance(output, list) and len(output) > 0:
return output[0].get('generated_text', 'Erreur: Réponse inattendue')
else:
return "Erreur: Réponse inattendue de l'API"
demo = gr.Interface(
fn = analyze_sentiment,
inputs=["text"],
outputs=["text"],
)
demo.launch()