Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	File size: 840 Bytes
			
			| c6b6d52 1856062 c6b6d52 1856062 c6b6d52 3cc14e8 576bbe0 3cc14e8 576bbe0 3cc14e8 576bbe0 3cc14e8 576bbe0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 | from fastapi import FastAPIForm
from fastapi.responses import FileResponse
from typing import Annotated
import torch 
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline 
app = FastAPI()
@app.get("/", response_class=FileResponse)
async def root():
    return "home.html"
@app.post("/hello/")
async def say_hello(msg: Annotated[str, Form()]):
    print("model")
    tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
    model = AutoModelForCausalLM.from_pretrained(
        "google/gemma-2b-it",
        device_map="auto",
        torch_dtype=torch.bfloat16
    )
    print("token & msg")
    input_ids = tokenizer(msg, return_tensors="pt").to("cpu")
    print("output")
    outputs = model.generate(**input_ids, max_length=500) 
    print("complete")
    return {"message": tokenizer.decode(outputs[0])} |