File size: 1,254 Bytes
58d9279
 
 
ec03c70
 
58d9279
f56245e
 
 
 
0451106
 
58d9279
 
 
 
 
4c8c277
58d9279
 
 
 
 
52e622e
58d9279
 
 
 
18160e9
58d9279
 
 
 
 
 
 
0451106
 
f56245e
 
 
4c8c277
 
 
95555b5
4c8c277
dc8ca07
4c8c277
1357256
 
4c8c277
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
"""
# Inference

import gradio as gr

app = gr.load(
    "google/gemma-2-2b-it",
    src = "models",
    inputs = [gr.Textbox(label = "Input")],
    outputs = [gr.Textbox(label = "Output")],
    title = "Google Gemma",
    description = "Inference",
    examples = [
        ["Hello, World."]
    ]
).launch()
"""
"""
# Pipeline

import gradio as gr
from transformers import pipeline

pipe = pipeline(model = "google/gemma-2-2b-it")

def fn(input):
    output = pipe(
        input,
        max_new_tokens = 2048
    )
    return output[0]["generated_text"]#[len(input):]

app = gr.Interface(
    fn = fn,
    inputs = [gr.Textbox(label = "Input")],
    outputs = [gr.Textbox(label = "Output")],
    title = "Google Gemma",
    description = "Pipeline",
    examples = [
        ["Hello, World."]
    ]
).launch()
"""

import gradio as gr
from huggingface_hub import InferenceClient
import os

token = os.getenv("HF_TOKEN")
client = InferenceClient(api_key=token)

messages = [
	{ "role": "user", "content": "Tell me a story" }
]

stream = client.chat.completions.create(
    model="google/gemma-2-2b-it", 
	messages=messages, 
	temperature=0.5,
	max_tokens=2048,
	top_p=0.7,
	stream=True
)

for chunk in stream:
    print(chunk.choices[0].delta.content)