Spaces:
Runtime error
Runtime error
File size: 1,640 Bytes
0512849 102a3b0 98db4b3 c2d0dc7 102a3b0 98db4b3 59e3ffd 102a3b0 98db4b3 102a3b0 98db4b3 c2d0dc7 98db4b3 c2d0dc7 98db4b3 102a3b0 98db4b3 102a3b0 59e3ffd 98db4b3 102a3b0 98db4b3 102a3b0 98db4b3 c2d0dc7 102a3b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import os
import torch
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, Field
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig, BitsAndBytesConfig
# Set a writable cache directory
os.environ["HF_HOME"] = "/tmp/huggingface"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface"
# Model setup
MODEL_NAME = "google/gemma-2b" # Smaller, CPU-friendly model
DEVICE = "cpu"
# 4-bit Quantization for CPU
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True
)
# Load model & tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
quantization_config=quantization_config,
device_map="cpu"
)
# Set generation config
model.generation_config = GenerationConfig.from_pretrained(MODEL_NAME)
model.generation_config.pad_token_id = model.generation_config.eos_token_id
# FastAPI app
app = FastAPI()
# Request payload
class TextGenerationRequest(BaseModel):
prompt: str
max_tokens: int = Field(default=100, ge=1, le=512) # Prevent too large token requests
@app.post("/generate")
async def generate_text(request: TextGenerationRequest):
try:
inputs = tokenizer(request.prompt, return_tensors="pt").to(DEVICE)
outputs = model.generate(**inputs, max_new_tokens=request.max_tokens, do_sample=True)
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"generated_text": result}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
|