Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,20 +9,20 @@ client = InferenceClient(model)
|
|
| 9 |
def fn_text(
|
| 10 |
prompt,
|
| 11 |
history,
|
| 12 |
-
system_prompt,
|
| 13 |
max_tokens,
|
| 14 |
temperature,
|
| 15 |
top_p,
|
| 16 |
):
|
| 17 |
-
#messages = [{"role": "
|
| 18 |
#history.append(messages[0])
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
messages = [{"role": "
|
| 21 |
history.append(messages[0])
|
| 22 |
|
| 23 |
-
messages.append({"role": "user", "content": prompt})
|
| 24 |
-
history.append(messages[1])
|
| 25 |
-
|
| 26 |
stream = client.chat.completions.create(
|
| 27 |
model = model,
|
| 28 |
messages = history,
|
|
@@ -41,7 +41,7 @@ app_text = gr.ChatInterface(
|
|
| 41 |
fn = fn_text,
|
| 42 |
type = "messages",
|
| 43 |
additional_inputs = [
|
| 44 |
-
gr.Textbox(value="You are a helpful assistant.", label="System Prompt"),
|
| 45 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max Tokens"),
|
| 46 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 47 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P"),
|
|
@@ -117,30 +117,4 @@ app = gr.ChatInterface(
|
|
| 117 |
)
|
| 118 |
|
| 119 |
if __name__ == "__main__":
|
| 120 |
-
app.launch()
|
| 121 |
-
|
| 122 |
-
# Pipeline
|
| 123 |
-
|
| 124 |
-
import gradio as gr
|
| 125 |
-
from transformers import pipeline
|
| 126 |
-
|
| 127 |
-
pipe = pipeline(model = "google/gemma-2-2b-it")
|
| 128 |
-
|
| 129 |
-
def fn(input):
|
| 130 |
-
output = pipe(
|
| 131 |
-
input,
|
| 132 |
-
max_new_tokens = 2048
|
| 133 |
-
)
|
| 134 |
-
return output[0]["generated_text"]#[len(input):]
|
| 135 |
-
|
| 136 |
-
app = gr.Interface(
|
| 137 |
-
fn = fn,
|
| 138 |
-
inputs = [gr.Textbox(label = "Input")],
|
| 139 |
-
outputs = [gr.Textbox(label = "Output")],
|
| 140 |
-
title = "Google Gemma",
|
| 141 |
-
description = "Pipeline",
|
| 142 |
-
examples = [
|
| 143 |
-
["Hello, World."]
|
| 144 |
-
]
|
| 145 |
-
).launch()
|
| 146 |
-
"""
|
|
|
|
| 9 |
def fn_text(
|
| 10 |
prompt,
|
| 11 |
history,
|
| 12 |
+
#system_prompt,
|
| 13 |
max_tokens,
|
| 14 |
temperature,
|
| 15 |
top_p,
|
| 16 |
):
|
| 17 |
+
#messages = [{"role": "system", "content": system_prompt}]
|
| 18 |
#history.append(messages[0])
|
| 19 |
+
|
| 20 |
+
#messages.append({"role": "user", "content": prompt})
|
| 21 |
+
#history.append(messages[1])
|
| 22 |
|
| 23 |
+
messages = [{"role": "user", "content": prompt}]
|
| 24 |
history.append(messages[0])
|
| 25 |
|
|
|
|
|
|
|
|
|
|
| 26 |
stream = client.chat.completions.create(
|
| 27 |
model = model,
|
| 28 |
messages = history,
|
|
|
|
| 41 |
fn = fn_text,
|
| 42 |
type = "messages",
|
| 43 |
additional_inputs = [
|
| 44 |
+
#gr.Textbox(value="You are a helpful assistant.", label="System Prompt"),
|
| 45 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max Tokens"),
|
| 46 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 47 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P"),
|
|
|
|
| 117 |
)
|
| 118 |
|
| 119 |
if __name__ == "__main__":
|
| 120 |
+
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|