privateuserh's picture
Update app.py
dced53b verified
raw
history blame
4.26 kB
# app.py on Hugging Face Space
import gradio as gr
import requests # Used for making HTTP requests to your backend
import os
# --- IMPORTANT ---
# Replace this with your actual Cloudflare Worker URL after deployment.
# You can also set this as a Hugging Face Space secret if you prefer.
BACKEND_API_URL = os.getenv("BACKEND_API_URL", "https://your-worker-name.your-username.workers.dev")
# Example: https://actor-llm-deepseek-backend.your-username.workers.dev
# Store conversation history for context
conversation_history = []
current_script_in_session = ""
current_character_in_session = ""
def get_actor_advice(user_query, script_input, character_name_input):
global conversation_history, current_script_in_session, current_character_in_session
# 1. Check if script or character changed to reset context
if script_input != current_script_in_session or character_name_input != current_character_in_session:
conversation_history = [] # Reset history
current_script_in_session = script_input
current_character_in_session = character_name_input
gr.Warning("Script or character changed! Conversation context has been reset.")
# 2. Prepare payload for the Cloudflare Worker
payload = {
"userQuery": user_query,
"scriptContent": script_input,
"characterName": character_name_input,
"conversationHistory": conversation_history # Send current history
}
headers = {"Content-Type": "application/json"}
try:
# 3. Make HTTP POST request to your Cloudflare Worker
response = requests.post(BACKEND_API_URL, json=payload, headers=headers)
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
response_data = response.json()
llm_response = response_data.get("response", "No advice received.")
# 4. Update conversation history with user query and LLM response
conversation_history.append({"role": "user", "content": user_query})
conversation_history.append({"role": "assistant", "content": llm_response})
return llm_response
except requests.exceptions.RequestException as e:
print(f"Error communicating with backend: {e}")
return f"Error connecting to the backend. Please ensure the backend is deployed and accessible. Details: {e}"
except Exception as e:
print(f"An unexpected error occurred: {e}")
return f"An unexpected error occurred: {e}"
# --- Frontend UI with Gradio ---
with gr.Blocks() as demo:
gr.Markdown("# Actor's LLM Assistant")
gr.Markdown("Enter your script and ask for acting advice for your character. The AI will remember past queries in the current session.")
with gr.Row():
with gr.Column():
script_input = gr.Textbox(
label="Paste Your Script Here",
lines=10,
placeholder="[Scene: A dimly lit stage...]\nANNA: (Whispering) 'I can't believe this...'"
)
character_name_input = gr.Textbox(
label="Your Character's Name",
placeholder="e.g., Anna"
)
# Photo customization placeholder (as discussed, for UI or future multimodal)
photo_upload = gr.Image(
label="Upload Actor Photo (for UI personalization)",
type="pil", # Pillow image object
sources=["upload"],
interactive=True
)
gr.Markdown("*(Note: Photo customization is for UI personalization. The LLM itself currently processes text only.)*")
with gr.Column():
query_input = gr.Textbox(
label="Ask for Acting Advice",
placeholder="e.g., How should Anna deliver her line 'I can't believe this...' to convey despair?",
lines=3
)
submit_btn = gr.Button("Get Advice")
output_text = gr.Textbox(label="LLM Advice", lines=7)
submit_btn.click(
fn=get_actor_advice,
inputs=[query_input, script_input, character_name_input],
outputs=output_text
)
gr.Markdown("---")
gr.Markdown("Powered by DeepSeek LLMs, Hugging Face, and Cloudflare.")
demo.launch(share=True)