GeminiFan207 commited on
Commit
a5e50a2
·
verified ·
1 Parent(s): 0ed8f16

Create web_api.py

Browse files
Files changed (1) hide show
  1. web_api.py +116 -0
web_api.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.staticfiles import StaticFiles
4
+ from pydantic import BaseModel
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer
6
+ import torch
7
+ import logging
8
+ import aiofiles
9
+ import json
10
+ from typing import List, Optional
11
+ from datetime import datetime
12
+
13
+ # Configure logging
14
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
15
+ logger = logging.getLogger(__name__)
16
+
17
+ # Initialize FastAPI app
18
+ app = FastAPI()
19
+
20
+ # Enable CORS (Cross-Origin Resource Sharing)
21
+ app.add_middleware(
22
+ CORSMiddleware,
23
+ allow_origins=["*"],
24
+ allow_methods=["*"],
25
+ allow_headers=["*"],
26
+ )
27
+
28
+ # Serve static files (HTML/CSS/JavaScript)
29
+ app.mount("/static", StaticFiles(directory="static"), name="static")
30
+
31
+ # Load AI model and tokenizer
32
+ MODEL_NAME = "mistralai/Mistral-8x7B" # Replace with your AI model
33
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
34
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16)
35
+
36
+ # In-memory storage for search history
37
+ search_history = []
38
+
39
+ # Pydantic models
40
+ class InferenceRequest(BaseModel):
41
+ prompt: str
42
+ max_length: Optional[int] = 100
43
+
44
+ class InferenceResponse(BaseModel):
45
+ generated_text: str
46
+ timestamp: str
47
+
48
+ class SearchHistoryResponse(BaseModel):
49
+ history: List[InferenceResponse]
50
+
51
+ # API Endpoints
52
+ @app.post("/inference")
53
+ async def run_inference(request: InferenceRequest):
54
+ """Run inference using the AI model."""
55
+ try:
56
+ inputs = tokenizer(request.prompt, return_tensors="pt")
57
+ outputs = model.generate(inputs.input_ids, max_length=request.max_length)
58
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
59
+
60
+ # Log the search
61
+ search_entry = InferenceResponse(
62
+ generated_text=generated_text,
63
+ timestamp=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
64
+ )
65
+ search_history.append(search_entry)
66
+
67
+ logger.info(f"Inference completed for prompt: {request.prompt}")
68
+ return search_entry
69
+ except Exception as e:
70
+ logger.error(f"Error during inference: {e}")
71
+ raise HTTPException(status_code=500, detail="Failed to run inference.")
72
+
73
+ @app.get("/search-history")
74
+ async def get_search_history():
75
+ """Get the history of all searches."""
76
+ return SearchHistoryResponse(history=search_history)
77
+
78
+ # WebSocket for real-time interaction
79
+ @app.websocket("/ws")
80
+ async def websocket_endpoint(websocket: WebSocket):
81
+ await websocket.accept()
82
+ try:
83
+ while True:
84
+ data = await websocket.receive_text()
85
+ request = json.loads(data)
86
+ prompt = request.get("prompt")
87
+ max_length = request.get("max_length", 100)
88
+
89
+ if not prompt:
90
+ await websocket.send_text(json.dumps({"error": "Prompt is required."}))
91
+ continue
92
+
93
+ # Run inference
94
+ inputs = tokenizer(prompt, return_tensors="pt")
95
+ outputs = model.generate(inputs.input_ids, max_length=max_length)
96
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
97
+
98
+ # Send response
99
+ response = {
100
+ "generated_text": generated_text,
101
+ "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
102
+ }
103
+ await websocket.send_text(json.dumps(response))
104
+
105
+ except WebSocketDisconnect:
106
+ logger.info("WebSocket disconnected.")
107
+ except Exception as e:
108
+ logger.error(f"WebSocket error: {e}")
109
+ await websocket.send_text(json.dumps({"error": str(e)}))
110
+
111
+ # Serve frontend
112
+ @app.get("/")
113
+ async def serve_frontend():
114
+ """Serve the frontend HTML file."""
115
+ async with aiofiles.open("static/index.html", mode="r") as file:
116
+ return await file.read()