File size: 7,353 Bytes
04c25c5
0c3f7c7
04c25c5
9b9128d
0c3f7c7
 
1d73b44
 
 
 
 
 
 
 
 
 
 
 
0c3f7c7
bbbbe65
d753c71
 
 
 
 
 
bbbbe65
d753c71
 
 
 
 
 
 
 
 
 
 
 
 
 
e9b47ff
d786de6
e9b47ff
8686afb
e9b47ff
19ac70e
f74448c
19ac70e
f74448c
8686afb
f74448c
e9b47ff
6e0c63c
1d73b44
 
00ff648
369dc1f
cb18d46
2a37133
04c25c5
 
 
00ff648
 
cb18d46
04c25c5
5d52fdf
6e0c63c
 
f74448c
 
5d52fdf
00ff648
 
1850cee
 
 
 
 
 
 
 
 
 
 
 
 
00ff648
1850cee
 
 
 
 
00ff648
1850cee
00ff648
 
04c25c5
0c3f7c7
 
 
 
 
 
 
 
 
 
 
 
d7942b7
 
fe8c2db
 
 
 
 
 
 
 
 
04c25c5
6e0c63c
34a59de
6b083b8
04c25c5
 
 
 
 
71a4c63
 
 
 
 
 
 
6b083b8
 
04c25c5
fe8c2db
6b083b8
fe8c2db
db4476f
fe8c2db
 
 
6e0c63c
0c3f7c7
 
 
 
 
 
 
 
 
 
 
 
 
71a4c63
0c3f7c7
8debc62
00ff648
6e0c63c
00ff648
6e0c63c
8debc62
d7942b7
d0d4c9d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import gradio as gr
from gradio_client import Client
from huggingface_hub import InferenceClient
import random
ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")

models=[
    "google/gemma-7b",
    "google/gemma-7b-it",
    "google/gemma-2b",
    "google/gemma-2b-it"
]
clients=[
InferenceClient(models[0]),
InferenceClient(models[1]),
InferenceClient(models[2]),
InferenceClient(models[3]),
]

def compress_history(history,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem):
    client=clients[int(client_choice)-1]
    COMPRESS_HISTORY="""You are an Information Summarizer Agent. Your duty is to summarize the following information into a more concise format with far less words.
    Retain all the main points and provide a brief and concise summary of the conversation.
    Converstion:
    {history}"""
    print("COMPRESSING")
    formatted_prompt=f"{COMPRESS_HISTORY.format(history=history[0-chat_mem:])}"
    generate_kwargs = dict(
        temperature=temp,
        max_new_tokens=1024,
        top_p=top_p,
        repetition_penalty=rep_p,
        do_sample=True,
        seed=seed,
    )
    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""
    for response in stream:
        output += response.token.text
    return output
    
def format_prompt(message, history):
    prompt = ""
    if history:
        #<start_of_turn>userHow does the brain work?<end_of_turn><start_of_turn>model
        for user_prompt, bot_response in history:
            prompt += f"{user_prompt}\n"
            #print(prompt)
            prompt += f"{bot_response}\n"
            #print(prompt)
    prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
    #print(prompt)
    return prompt
result = []


def chat_inf(system_prompt,prompt,history,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem):
    #token max=8192
    hist_len=0
    client=clients[int(client_choice)-1]
    if not history:
        history = []
        hist_len=0
    if memory:
        for ea in memory[0-chat_mem:]:
            hist_len+=len(str(ea))
        print(hist_len)
    in_len=len(system_prompt+prompt)+hist_len
    
    
    print("\n######### HIST "+str(in_len))
    print("\n######### TOKENS "+str(tokens))
    if (in_len+tokens) > 8000:
        history.append((prompt,"Wait, that's too many tokens, please reduce the Chat Memory value"))
        yield history,memory
        #hist=compress_history(history,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem)
        #yield [(prompt,"History has been compressed, processing request...")]
        #history.append((prompt,hist))
    else:
        generate_kwargs = dict(
            temperature=temp,
            max_new_tokens=tokens,
            top_p=top_p,
            repetition_penalty=rep_p,
            do_sample=True,
            seed=seed,
        )
        #formatted_prompt=prompt   
        formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0-chat_mem:])
        print("\n######### PROMPT "+str(len(formatted_prompt)))
        stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
        output = ""
        for response in stream:
            output += response.token.text
            yield [(prompt,output)],memory
        history.append((prompt,output))
        memory.append((prompt,output))
        yield history,memory

def get_screenshot(chat: list,height=5000,width=600,chatblock=[],theme="light",wait=3000,header=True):
    print(chatblock)
    tog = 0
    if chatblock:
        tog = 3
    result = ss_client.predict(str(chat),height,width,chatblock,header,theme,wait,api_name="/run_script")
    out = f'https://omnibus-html-image-current-tab.hf.space/file={result[tog]}'
    print(out)
    return out



def clear_fn():
    return None,None,None
rand_val=random.randint(1,1111111111111111)
def check_rand(inp,val):
    if inp==True:
        return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1,1111111111111111))
    else:
        return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))


    
with gr.Blocks() as app:
    memory=gr.State()
    gr.HTML("""<center><h1 style='font-size:xx-large;'>Google Gemma Models</h1><br><h3>running on Huggingface Inference Client</h3><br><h7>EXPERIMENTAL""")
    chat_b = gr.Chatbot(height=500)
    with gr.Group():
        with gr.Row():
            with gr.Column(scale=3):
                inp = gr.Textbox(label="Prompt")
                sys_inp = gr.Textbox(label="System Prompt (optional)")
                with gr.Row():
                    with gr.Column(scale=2):
                        btn = gr.Button("Chat")
                    with gr.Column(scale=1):
                        with gr.Group():
                            stop_btn=gr.Button("Stop")
                            clear_btn=gr.Button("Clear")                
                client_choice=gr.Dropdown(label="Models",type='index',choices=[c for c in models],value=models[0],interactive=True)

            with gr.Column(scale=1):
                with gr.Group():
                    rand = gr.Checkbox(label="Random Seed", value=True)
                    seed=gr.Slider(label="Seed", minimum=1, maximum=1111111111111111,step=1, value=rand_val)
                    tokens = gr.Slider(label="Max new tokens",value=3840,minimum=0,maximum=8000,step=64,interactive=True, visible=True,info="The maximum number of tokens")
                    temp=gr.Slider(label="Temperature",step=0.01, minimum=0.01, maximum=1.0, value=0.9)
                    top_p=gr.Slider(label="Top-P",step=0.01, minimum=0.01, maximum=1.0, value=0.9)
                    rep_p=gr.Slider(label="Repetition Penalty",step=0.1, minimum=0.1, maximum=2.0, value=1.0)
                    chat_mem=gr.Number(label="Chat Memory", info="Number of previous chats to retain",value=5)
        with gr.Accordion(label="Screenshot",open=False):
            with gr.Row():
                with gr.Column(scale=3):
                    im_btn=gr.Button("Screenshot")
                    img=gr.Image(type='filepath')
                with gr.Column(scale=1):
                    with gr.Row():
                        im_height=gr.Number(label="Height",value=5000)
                        im_width=gr.Number(label="Width",value=500)
                    wait_time=gr.Number(label="Wait Time",value=3000)
                    theme=gr.Radio(label="Theme", choices=["light","dark"],value="light")
                    chatblock=gr.Dropdown(label="Chatblocks",info="Choose specific blocks of chat",choices=[c for c in range(1,40)],multiselect=True)
            


    im_go=im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
    chat_sub=inp.submit(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem],[chat_b,memory])
    
    go=btn.click(check_rand,[rand,seed],seed).then(chat_inf,[sys_inp,inp,chat_b,memory,client_choice,seed,temp,tokens,top_p,rep_p,chat_mem],[chat_b,memory])
    
    stop_btn.click(None,None,None,cancels=[go,im_go,chat_sub])
    clear_btn.click(clear_fn,None,[inp,sys_inp,chat_b])
app.queue(default_concurrency_limit=10).launch()