DeMaking's picture
Create app.py
32107e6 verified
raw
history blame
2.07 kB
import uvicorn
import logging
import os
from fastapi import FastAPI, Request
import httpx
from transformers import pipeline
from langdetect import detect
from huggingface_hub import login
app = FastAPI()
# Global variables
TOKEN = os.getenv("TELEGRAM_BOT_TOKEN") # Telegram Token
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
# Verify Hugging Face token
if not HF_HUB_TOKEN:
raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN in environment variables.")
login(token=HF_HUB_TOKEN)
# Verify Telegram token
if not TOKEN:
raise ValueError("Missing Telegram token. Please set TELEGRAM_BOT_TOKEN in environment variables.")
try:
response = httpx.get(f"https://api.telegram.org/bot{TOKEN}/getMe")
print(f"Using TELEGRAM_TOKEN: {TOKEN[:5]}***") # Part of the token
print(response.json())
except httpx.RequestError as e:
print(f"aaRequest failed: {e}")
# Configure logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
# Load Hebrew and English text generation models
hebrew_generator = pipeline("text-generation", model="Norod78/hebrew-gpt_neo-small")
english_generator = pipeline("text-generation", model="distilgpt2")
# Function to detect language
def detect_language(user_input):
try:
lang = detect(user_input)
return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported"
except:
return "unsupported"
# Function to generate a response
def generate_response(text):
language = detect_language(text)
if language == "hebrew":
return hebrew_generator(text, max_length=100)[0]["generated_text"]
elif language == "english":
return english_generator(text, max_length=100)[0]["generated_text"]
return "Sorry, I only support Hebrew and English."
@app.get("/")
async def root():
return {"message": "Server is running on HF Spaces"}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)