Zmypl commited on
Commit
2bccf44
·
verified ·
1 Parent(s): d71dd34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -12
app.py CHANGED
@@ -142,20 +142,16 @@ def chat_with_model(input_sentence):
142
 
143
 
144
  print(apikey)
145
- client = OpenAI(
146
- api_key = apikey
147
- base_url ="https://api-inference.huggingface.co/v1/",
148
-
149
- )
150
 
151
  messages = [
152
-
153
- { "role": "user", "content": "سلام" },
154
-
155
  ]
156
 
157
  stream = client.chat.completions.create(
158
- model="Qwen/Qwen2.5-Coder-32B-Instruct",
159
  messages=messages,
160
  temperature=0.5,
161
  max_tokens=2048,
@@ -164,9 +160,8 @@ def chat_with_model(input_sentence):
164
  )
165
 
166
  for chunk in stream:
167
- print(chunk.choices[0].delta.content)
168
- return (chunk.choices[0].delta.content)
169
-
170
  # رابط کاربری Gradio
171
  with gr.Blocks() as demo:
172
  gr.Markdown("## چت با مدل")
 
142
 
143
 
144
  print(apikey)
145
+ from huggingface_hub import InferenceClient
146
+
147
+ client = InferenceClient(api_key="YOUR_HF_TOKEN")
 
 
148
 
149
  messages = [
150
+ { "role": "user", "content": "Tell me a story" }
 
 
151
  ]
152
 
153
  stream = client.chat.completions.create(
154
+ model="Qwen/Qwen2.5-72B-Instruct",
155
  messages=messages,
156
  temperature=0.5,
157
  max_tokens=2048,
 
160
  )
161
 
162
  for chunk in stream:
163
+ print(chunk.choices[0].delta.content)
164
+
 
165
  # رابط کاربری Gradio
166
  with gr.Blocks() as demo:
167
  gr.Markdown("## چت با مدل")