File size: 2,327 Bytes
6425b58
 
174e5d7
 
 
6425b58
02308ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6425b58
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import gradio as gr

import os
os.system("pip install transformers>=4.37.0")

def greet(name):
    f = hello()
    return f

def hello():
    from transformers import AutoModelForCausalLM, AutoTokenizer

    # Using pandas to read some structured data
    import pandas as pd
    from io import StringIO
    
    # single table
    EXAMPLE_CSV_CONTENT = """
    "Loss","Date","Score","Opponent","Record","Attendance"
    "Hampton (14–12)","September 25","8–7","Padres","67–84","31,193"
    "Speier (5–3)","September 26","3–1","Padres","67–85","30,711"
    "Elarton (4–9)","September 22","3–1","@ Expos","65–83","9,707"
    "Lundquist (0–1)","September 24","15–11","Padres","67–83","30,774"
    "Hampton (13–11)","September 6","9–5","Dodgers","61–78","31,407"
    """
    
    csv_file = StringIO(EXAMPLE_CSV_CONTENT)
    df = pd.read_csv(csv_file)
    
    model_name = "tablegpt/TableGPT2-7B"
    
    model = AutoModelForCausalLM.from_pretrained(
        model_name, torch_dtype="auto", device_map="auto"
    )
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    
    example_prompt_template = """Given access to several pandas dataframes, write the Python code to answer the user's question.
    
    /*
    "{var_name}.head(5).to_string(index=False)" as follows:
    {df_info}
    */
    
    Question: {user_question}
    """
    question = "ε“ͺδΊ›ζ―”θ΅›ηš„ζˆ˜η»©θΎΎεˆ°δΊ†40θƒœ40负?"
    
    prompt = example_prompt_template.format(
        var_name="df",
        df_info=df.head(5).to_string(index=False),
        user_question=question,
    )
    
    messages = [
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": prompt},
    ]
    text = tokenizer.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
    
    generated_ids = model.generate(**model_inputs, max_new_tokens=512)
    generated_ids = [
        output_ids[len(input_ids) :]
        for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    
    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return response


demo = gr.Interface(fn=greet, inputs="text", outputs="text")
demo.launch()