File size: 1,356 Bytes
e264e9a d8cd951 583310d e264e9a a18bd58 e264e9a d8cd951 e264e9a d8cd951 e264e9a a18bd58 e264e9a f7cf3be 2deb7a7 e264e9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
# app.py (root)
from pathlib import Path
import gradio as gr
from inference import chat_completion # your back‑end helpers
from tavily_search import enhance_query_with_search
from utils import (extract_text_from_file, extract_website_content,
history_to_messages, apply_search_replace_changes,
remove_code_block, parse_transformers_js_output,
format_transformers_js_output)
from models import AVAILABLE_MODELS, find_model
from deploy import send_to_sandbox
# ---------- backend callback -------------
def generate(payload):
prompt = payload["prompt"]
model = find_model(payload["model_id"])
lang = payload["language"]
history = payload.get("history", [])
# … same logic as before, returns {"code": ..., "history": [...] }
# (keep your existing generate() from the compact app.py)
return code, history
# ---------- UI ---------------------------
INDEX = Path("static/index.html").read_text(encoding="utf-8")
with gr.Blocks(css="body{margin:0}", title="AnyCoder AI") as demo:
# custom front‑end
gr.HTML(INDEX, unsafe_allow_html=True)
# hidden API endpoints
generator = gr.JSON(
label="hidden",
visible=False
).api(generate, path="/run/predict", methods=["POST"])
if __name__ == "__main__":
demo.launch()
|