File size: 4,739 Bytes
baf7f9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc9ee64
baf7f9c
 
dc9ee64
 
baf7f9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
de93a35
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import json
import os
import pandas as pd
import requests
import threading
import streamlit as st
from datasets import load_dataset, load_metric

MODELS = ["CodeParrot", "InCoder", "CodeGen", "PolyCoder"]
GENERATION_MODELS = ["CodeParrot", "InCoder", "CodeGen"]


@st.cache()
def load_examples():
    with open("utils/examples.json", "r") as f:
        examples = json.load(f)
    return examples
    
    
def load_evaluation():
    # load task 2 of HumanEval and code_eval_metric
    os.environ["HF_ALLOW_CODE_EVAL"] = "1"
    human_eval = load_dataset("openai_humaneval")
    entry_point = f"check({human_eval['test'][2]['entry_point']})"
    test_func = "\n" + human_eval["test"][2]["test"] + "\n" + entry_point
    code_eval = load_metric("code_eval")
    return code_eval, test_func


def read_markdown(path):
    with open(path, "r") as f:
        output = f.read()
    st.markdown(output, unsafe_allow_html=True)


def generate_code(
    generations, model_name, gen_prompt, max_new_tokens, temperature, seed
):
    # call space using its API endpoint
    url = (
        f"https://hf.space/embed/codeparrot/{model_name.lower()}-subspace/+/api/predict/"
    )
    r = requests.post(
        url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
    )
    generated_text = r.json()["data"][0]
    generations.append({model_name: generated_text})


def generate_code_threads(
    generations, models, gen_prompt, max_new_tokens, temperature, seed
):
    threads = []
    for model_name in models:
        # create the thread
        threads.append(
            threading.Thread(
                target=generate_code,
                args=(
                    generations,
                    model_name,
                    gen_prompt,
                    max_new_tokens,
                    temperature,
                    seed,
                ),
            )
        )
        threads[-1].start()

    for t in threads:
        t.join()

@st.cache(show_spinner=False)
def generate_teaser(gen_prompt):
    generations = []
    generate_code(generations, "CodeParrot", gen_prompt, 8, 0.2, 42)
    return generations[0]["CodeParrot"]
    
st.set_page_config(page_icon=":laptop:", layout="wide")


# Introduction
st.title("Genera codice online🤗")

# Code generation
st.subheader("Genera codice online  ✨")

col1, col2, col3 = st.columns([7, 1, 6])
with col1:
    st.markdown("**Models**")
    selected_models = st.multiselect(
        "Select code generation models to compare:",
        GENERATION_MODELS,
        default=GENERATION_MODELS,
        key=3,
    )
    st.markdown(" ")
    st.markdown("**Examples**")
    examples = load_examples()
    example_names = [example["name"] for example in examples]
    name2id = dict([(name, i) for i, name in enumerate(example_names)])
    selected_example = st.selectbox(
        "Select one of the following examples or implement yours:", example_names
    )
    example_text = examples[name2id[selected_example]]["value"]
    default_length = examples[name2id[selected_example]]["length"]
with col3:
    st.markdown("**Generation settings**")
    temperature = st.slider(
        "Temperature:", value=0.2, min_value=0.1, step=0.1, max_value=2.0
    )
    max_new_tokens = st.slider(
        "Number of tokens to generate:",
        value=default_length,
        min_value=8,
        step=4,
        max_value=256,
    )
    seed = st.slider("Random seed:", value=42, min_value=0, step=1, max_value=1000)
gen_prompt = st.text_area(
    "Generate code with prompt:",
    value=example_text,
    height=200,
).strip()
if st.button("Generate code!", key=4):
    with st.spinner("Generating code..."):
        # use threading
        generations = []
        generate_code_threads(
            generations,
            selected_models,
            gen_prompt=gen_prompt,
            max_new_tokens=max_new_tokens,
            temperature=temperature,
            seed=seed,
        )
        for i in range(len(generations)):
            st.markdown(f"**{selected_models[i]}**")
            for j in range(len(generations)):
                if selected_models[i] in generations[j].keys():
                    st.code(generations[j][selected_models[i]])
        if len(generations) < len(selected_models):
            st.markdown("<span style='color:red'>Warning: Some models run into timeout, try another time or reduce the Number of tokens to generate. You can also try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)