from fastapi import FastAPI from transformers import AutoModelForCausalLM from mistral_common import MistralTokenizer # Hypothetical package, adjust based on actual package name and usage from peft import PeftModel, PeftConfig # Initialize FastAPI app app = FastAPI() # Load PEFT model configuration and base model config = PeftConfig.from_pretrained("frankmorales2020/Mistral-7B-text-to-sql-flash-attention-2-dataeval") base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3") model = PeftModel.from_pretrained(base_model, "frankmorales2020/Mistral-7B-text-to-sql-flash-attention-2-dataeval") # Load recommended tokenizer tokenizer = MistralTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3") # Create the pipeline from transformers import pipeline pipe = pipeline("text2sql", model=model, tokenizer=tokenizer) @app.get("/") def home(): return {"message": "Hello World"} @app.get("/generate") def generate(text: str): output = pipe(text) return {"output": output[0]['generated_text']}