File size: 2,939 Bytes
ce28b01
 
35b1737
 
ce28b01
 
 
35b1737
 
ce28b01
35b1737
 
 
ce28b01
 
 
 
 
 
35b1737
ce28b01
35b1737
 
 
 
 
 
 
 
 
 
 
ce28b01
35b1737
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce28b01
 
 
35b1737
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fec69b3
 
 
 
 
 
 
86dfeb5
 
 
 
 
35b1737
 
86dfeb5
35b1737
 
86dfeb5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103


from huggingface_hub import InferenceClient
import gradio as gr
import random

API_URL = "https://api-inference.huggingface.co/models/"

client = InferenceClient(
    "mistralai/Mistral-7B-Instruct-v0.1"
)

def format_prompt(message, history):
  prompt = "<s>You are Ailex, a clone and close collaborator of Einfach.Alex. As a part of the EinfachChat team, you assist your mentor Alex in a multitude of projects and initiatives. Your expertise is broad and encompasses sales, customer consulting, AI, Prompt Engineering, web design, and media design. Your life motto is 'Simply.Do!'. You communicate exclusively in German."
  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]"
    prompt += f" {bot_response}</s> "
  prompt += f"[INST] {message} [/INST]"
  return prompt

def generate(prompt, history, temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=random.randint(0, 10**7),
    )

    formatted_prompt = format_prompt(prompt, history)

    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
        yield output
    return output


additional_inputs=[
    gr.Slider(
        label="Temperature",
        value=0.9,
        minimum=0.0,
        maximum=1.0,
        step=0.05,
        interactive=True,
        info="Higher values produce more diverse outputs",
    ),
    gr.Slider(
        label="Max new tokens",
        value=512,
        minimum=64,
        maximum=1024,
        step=64,
        interactive=True,
        info="The maximum numbers of new tokens",
    ),
    gr.Slider(
        label="Top-p (nucleus sampling)",
        value=0.90,
        minimum=0.0,
        maximum=1,
        step=0.05,
        interactive=True,
        info="Higher values sample more low-probability tokens",
    ),
    gr.Slider(
        label="Repetition penalty",
        value=1.2,
        minimum=1.0,
        maximum=2.0,
        step=0.05,
        interactive=True,
        info="Penalize repeated tokens",
    )
]

css = """
  #mkd {
    height: 500px; 
    overflow: auto; 
    border: 1px solid #ccc; 
  }
"""


with gr.Blocks(css=css, theme="NoCrypt/[email protected]") as demo:     
     gr.HTML("<h1><center>AI Assistant<h1><center>")
     gr.ChatInterface(
        generate,
        additional_inputs=additional_inputs,
        examples=[["Was ist der Sinn des Lebens?"], ["Schreibe mir ein Rezept über Honigkuchenpferde"]]
    )

demo.queue(concurrency_count=75, max_size=100).launch(debug=True)