DeMaking's picture
Update app.py
080f22a verified
raw
history blame
1.62 kB
import uvicorn
import logging
import os
from fastapi import FastAPI, Request
import httpx
from transformers import pipeline
from langdetect import detect
from huggingface_hub import login
app = FastAPI()
# Global variables
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
# Verify Hugging Face token
if not HF_HUB_TOKEN:
raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN in environment variables.")
login(token=HF_HUB_TOKEN)
# Configure logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
# Load Hebrew and English text generation models
hebrew_generator = pipeline("text-generation", model="Norod78/hebrew-gpt_neo-small")
english_generator = pipeline("text-generation", model="distilgpt2")
# Function to detect language
def detect_language(user_input):
try:
lang = detect(user_input)
return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported"
except:
return "unsupported"
# Function to generate a response
def generate_response(text):
language = detect_language(text)
if language == "hebrew":
return hebrew_generator(text, max_length=100)[0]["generated_text"]
elif language == "english":
return english_generator(text, max_length=100)[0]["generated_text"]
return "Sorry, I only support Hebrew and English."
@app.get("/")
async def root():
return {"message": "Server is running on HF Spaces"}
if __name__ == "__main__":
### uvicorn.run(app, host="0.0.0.0", port=7860)