|
import uvicorn |
|
import logging |
|
import os |
|
from fastapi import FastAPI, Request |
|
import httpx |
|
from transformers import pipeline |
|
from langdetect import detect |
|
from huggingface_hub import login |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
TOKEN = os.getenv("TELEGRAM_BOT_TOKEN") |
|
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") |
|
|
|
|
|
|
|
if not HF_HUB_TOKEN: |
|
raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN in environment variables.") |
|
login(token=HF_HUB_TOKEN) |
|
|
|
|
|
|
|
if not TOKEN: |
|
raise ValueError("Missing Telegram token. Please set TELEGRAM_BOT_TOKEN in environment variables.") |
|
|
|
|
|
try: |
|
response = httpx.get(f"https://api.telegram.org/bot{TOKEN}/getMe") |
|
print(f"Using TELEGRAM_TOKEN: {TOKEN[:5]}***") |
|
print(response.json()) |
|
except httpx.RequestError as e: |
|
print(f"aaRequest failed: {e}") |
|
|
|
|
|
|
|
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
hebrew_generator = pipeline("text-generation", model="Norod78/hebrew-gpt_neo-small") |
|
english_generator = pipeline("text-generation", model="distilgpt2") |
|
|
|
|
|
|
|
def detect_language(user_input): |
|
try: |
|
lang = detect(user_input) |
|
return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported" |
|
except: |
|
return "unsupported" |
|
|
|
|
|
|
|
def generate_response(text): |
|
language = detect_language(text) |
|
if language == "hebrew": |
|
return hebrew_generator(text, max_length=100)[0]["generated_text"] |
|
elif language == "english": |
|
return english_generator(text, max_length=100)[0]["generated_text"] |
|
return "Sorry, I only support Hebrew and English." |
|
|
|
|
|
@app.get("/") |
|
async def root(): |
|
return {"message": "Server is running on HF Spaces"} |
|
|
|
|
|
if __name__ == "__main__": |
|
uvicorn.run(app, host="0.0.0.0", port=7860) |
|
|