DeMaking's picture
Update app.py
aac15bb verified
raw
history blame
4.53 kB
import logging
import os
from fastapi import FastAPI, Request
from contextlib import asynccontextmanager
from transformers import pipeline
import langid
from huggingface_hub import login
import socket
# Global variables
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
# Verify Hugging Face token
if not HF_HUB_TOKEN:
raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN in environment variables.")
login(token=HF_HUB_TOKEN)
# Load Hebrew and English text generation models
hebrew_generator = pipeline("text-generation", model="Norod78/hebrew-gpt_neo-small")
english_generator = pipeline("text-generation", model="distilgpt2")
# Function to detect language
def detect_language(user_input):
try:
# lang = detect(user_input)
lang, _ = langid.classify(user_input) # langid.classify returns a tuple (language, confidence)
print(f"Detected language: {lang}")
return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported"
except Exception as e:
print(f"Language detection error: {e}")
return "unsupported"
# Function to generate a response
# def generate_response(text):
# language = detect_language(text)
# if language == "hebrew":
# return hebrew_generator(text, max_length=100, truncation=True)[0]["generated_text"]
# elif language == "english":
# return english_generator(text, max_length=100, truncation=True)[0]["generated_text"]
# return "Sorry, I only support Hebrew and English."
def generate_response(text):
language = detect_language(text)
print(f"Detected language: {language}") # Debugging
if language == "hebrew":
output = hebrew_generator(text, max_length=100, truncation=True)
print(f"Hebrew model output: {output}") # Debugging
return output[0]["generated_text"]
elif language == "english":
output = english_generator(text, max_length=100, truncation=True)
print(f"English model output: {output}") # Debugging
return output[0]["generated_text"]
return "Sorry, I only support Hebrew and English."
# FastAPI lifespan event
@asynccontextmanager
async def lifespan(app: FastAPI):
print("Starting application...")
yield # Wait until app closes
print("Shutting down application...")
# Create FastAPI app
app = FastAPI(lifespan=lifespan)
@app.get("/")
async def root():
return {"message": "Decision Helper API is running!"}
# @app.post("/generate_response")
# async def generate_text(request: Request):
# try:
# data = await request.json()
# text = data.get("text", "").strip() # removes non-relevant spaces
# if not text:
# return {"error": "No text provided"}
# response = generate_response(text)
# return {"response": response}
# except Exception as e:
# logging.error(f"Error processing request: {e}")
# return {"error": "Invalid request. Please send JSON with a 'text' field."}
# @app.post("/generate_response")
# async def generate_text(request: Request):
# try:
# data = await request.json()
# logging.info(f"Received request: {data}") # Log the request data
# text = data.get("text", "").strip() # removes non-relevant spaces
# if not text:
# return {"error": "No text provided"}
# response = generate_response(text)
# logging.info(f"Generated response: {response}") # Log the response
# return {"response": response}
# except Exception as e:
# logging.error(f"Error processing request: {e}")
# return {"error": "Invalid request. Please send JSON with a 'text' field."}
@app.post("/generate_response")
async def generate_text(request: Request):
try:
data = await request.json()
if not data or "text" not in data:
logging.error("Received an empty or invalid request")
return {"error": "Invalid request. Please send JSON with a 'text' field."}
text = data["text"].strip()
if not text:
return {"error": "No text provided"}
response = generate_response(text)
return {"response": response}
except Exception as e:
logging.error(f"Error processing request: {e}")
return {"error": "Invalid request. Please send JSON with a 'text' field."}
# Run the server
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)