Kev09 commited on
Commit
86226b8
·
verified ·
1 Parent(s): 1b0a980

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -15
app.py CHANGED
@@ -5,27 +5,20 @@ def greet(name):
5
 
6
  from huggingface_hub import InferenceClient
7
 
8
- client = InferenceClient()
 
 
9
 
10
  messages = [
11
- {
12
- "role": "user",
13
- "content": "What is the capital of France?"
14
- }
15
  ]
 
 
16
 
17
- stream = client.chat.completions.create(
18
- model="Qwen/Qwen2.5-Math-7B-Instruct",
19
- messages=messages,
20
- max_tokens=500,
21
- stream=True
22
- )
23
 
24
- for chunk in stream:
25
- print(chunk.choices[0].delta.content, end="")
26
 
27
 
28
 
29
 
30
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
31
- demo.launch()
 
5
 
6
  from huggingface_hub import InferenceClient
7
 
8
+
9
+ # Use a pipeline as a high-level helper
10
+ from transformers import pipeline
11
 
12
  messages = [
13
+ {"role": "user", "content": "Who are you?"},
 
 
 
14
  ]
15
+ pipe = pipeline("text-generation", model="meta-llama/Llama-3.1-8B-Instruct")
16
+ pipe(messages)
17
 
 
 
 
 
 
 
18
 
 
 
19
 
20
 
21
 
22
 
23
+ #demo = gr.Interface(fn=greet, inputs="text", outputs="text")
24
+ #demo.launch()