File size: 1,383 Bytes
34b4f29
 
 
 
0f51816
34b4f29
 
 
ee67cd5
11c0784
 
34b4f29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import os
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
from langchain_community.llms import HuggingFacePipeline
from langchain import PromptTemplate, LLMChain

# β€” Model setup (small enough to CPU-serve in a Space) β€”
MODEL_ID = "bigcode/starcoder2-3b"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID,trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(MODEL_ID,trust_remote_code=True)

# wrap in a HF pipeline and LangChain LLM
pipe = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    max_new_tokens=64,
    temperature=0.2,
    top_p=0.95,
    do_sample=False,
)
llm = HuggingFacePipeline(pipeline=pipe)

# define a simple prompt β†’ chain
prompt = PromptTemplate(
    input_variables=["description"],
    template=(
        "### Convert English description to an Emmet abbreviation\n"
        "Description: {description}\n"
        "Emmet:"
    ),
)
chain = LLMChain(llm=llm, prompt=prompt)

# FastAPI app
app = FastAPI()

class Req(BaseModel):
    description: str

class Res(BaseModel):
    emmet: str

@app.post("/generate-emmet", response_model=Res)
async def generate_emmet(req: Req):
    raw = chain.run(req.description)
    # take just the first line after the prompt
    emmet = raw.strip().splitlines()[0]
    return {"emmet": emmet}