Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,14 +1,11 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
model_text = "google/gemma-3-4b-it"
|
7 |
-
#model_text = "google/gemma-2-27b-it"
|
8 |
|
9 |
client = InferenceClient()
|
10 |
|
11 |
-
def
|
12 |
prompt,
|
13 |
history,
|
14 |
input,
|
@@ -18,34 +15,36 @@ def fn_text(
|
|
18 |
top_p,
|
19 |
):
|
20 |
|
|
|
21 |
#messages = [{"role": "system", "content": system_prompt}]
|
22 |
#history.append(messages[0])
|
23 |
#messages.append({"role": "user", "content": prompt})
|
24 |
#history.append(messages[1])
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
messages = [
|
29 |
-
{
|
30 |
-
"role": "user",
|
31 |
-
"content": [
|
32 |
-
{
|
33 |
-
"type": "text",
|
34 |
-
"text": prompt
|
35 |
-
},
|
36 |
-
{
|
37 |
-
"type": "image_url",
|
38 |
-
"image_url": {
|
39 |
-
"url": input
|
40 |
-
}
|
41 |
-
}
|
42 |
-
]
|
43 |
-
}
|
44 |
-
]
|
45 |
history.append(messages[0])
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
stream = client.chat.completions.create(
|
48 |
-
model =
|
49 |
messages = history,
|
50 |
max_tokens = max_tokens,
|
51 |
temperature = temperature,
|
@@ -58,8 +57,8 @@ def fn_text(
|
|
58 |
chunks.append(chunk.choices[0].delta.content or "")
|
59 |
yield "".join(chunks)
|
60 |
|
61 |
-
|
62 |
-
fn =
|
63 |
type = "messages",
|
64 |
additional_inputs = [
|
65 |
gr.Textbox(label="Input"),
|
@@ -68,11 +67,11 @@ app_text = gr.ChatInterface(
|
|
68 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
69 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P"),
|
70 |
],
|
71 |
-
title = "Google Gemma",
|
72 |
-
description =
|
73 |
)
|
74 |
|
75 |
app = gr.TabbedInterface(
|
76 |
-
[
|
77 |
-
["
|
78 |
).launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
+
model_gemma_2 = "google/gemma-2-27b-it"
|
|
|
|
|
|
|
5 |
|
6 |
client = InferenceClient()
|
7 |
|
8 |
+
def fn_gemma_2(
|
9 |
prompt,
|
10 |
history,
|
11 |
input,
|
|
|
15 |
top_p,
|
16 |
):
|
17 |
|
18 |
+
# System Prompt
|
19 |
#messages = [{"role": "system", "content": system_prompt}]
|
20 |
#history.append(messages[0])
|
21 |
#messages.append({"role": "user", "content": prompt})
|
22 |
#history.append(messages[1])
|
23 |
|
24 |
+
messages = [{"role": "user", "content": prompt}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
history.append(messages[0])
|
26 |
|
27 |
+
#messages = [
|
28 |
+
# {
|
29 |
+
# "role": "user",
|
30 |
+
# "content": [
|
31 |
+
# {
|
32 |
+
# "type": "text",
|
33 |
+
# "text": prompt
|
34 |
+
# },
|
35 |
+
# {
|
36 |
+
# "type": "image_url",
|
37 |
+
# "image_url": {
|
38 |
+
# "url": input
|
39 |
+
# }
|
40 |
+
# }
|
41 |
+
# ]
|
42 |
+
# }
|
43 |
+
#]
|
44 |
+
#history.append(messages[0])
|
45 |
+
|
46 |
stream = client.chat.completions.create(
|
47 |
+
model = model_gemma_2,
|
48 |
messages = history,
|
49 |
max_tokens = max_tokens,
|
50 |
temperature = temperature,
|
|
|
57 |
chunks.append(chunk.choices[0].delta.content or "")
|
58 |
yield "".join(chunks)
|
59 |
|
60 |
+
app_gemma_2 = gr.ChatInterface(
|
61 |
+
fn = fn_gemma_2,
|
62 |
type = "messages",
|
63 |
additional_inputs = [
|
64 |
gr.Textbox(label="Input"),
|
|
|
67 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
68 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P"),
|
69 |
],
|
70 |
+
title = "Google Gemma 2",
|
71 |
+
description = model_gemma_2,
|
72 |
)
|
73 |
|
74 |
app = gr.TabbedInterface(
|
75 |
+
[app_gemma_2],
|
76 |
+
["Gemma 2"]
|
77 |
).launch()
|