import logging import os from fastapi import FastAPI, Request from contextlib import asynccontextmanager from transformers import pipeline from langdetect import detect from huggingface_hub import login import socket # Global variables HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") # Verify Hugging Face token if not HF_HUB_TOKEN: raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN in environment variables.") login(token=HF_HUB_TOKEN) # Load Hebrew and English text generation models hebrew_generator = pipeline("text-generation", model="Norod78/hebrew-gpt_neo-small") english_generator = pipeline("text-generation", model="distilgpt2") # Function to detect language def detect_language(user_input): try: lang = detect(user_input) return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported" except: return "unsupported" # Function to generate a response def generate_response(text): language = detect_language(text) if language == "hebrew": return hebrew_generator(text, max_length=100)[0]["generated_text"] elif language == "english": return english_generator(text, max_length=100)[0]["generated_text"] return "Sorry, I only support Hebrew and English." # FastAPI lifespan event @asynccontextmanager async def lifespan(app: FastAPI): print("Starting application...") yield # Wait until app closes print("Shutting down application...") # Create FastAPI app app = FastAPI(lifespan=lifespan) @app.get("/") async def root(): return {"message": "Decision Helper API is running!"} @app.post("/generate_response") async def generate_text(request: Request): data = await request.json() text = data.get("text", "") if not text: return {"error": "No text provided"} response = generate_response(text) return {"response": response} # Run the server if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=7860)