suayptalha commited on
Commit
942dab0
·
verified ·
1 Parent(s): ce928ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -4,7 +4,7 @@ from huggingface_hub import InferenceClient
4
 
5
  moondream_client = Client("vikhyatk/moondream2")
6
 
7
- llama_client = InferenceClient("Qwen/QwQ-32B-Preview")
8
 
9
  history = []
10
 
@@ -23,14 +23,14 @@ def describe_image(image, user_message):
23
  history.append(f"Assistant: {description}")
24
 
25
  full_conversation = "\n".join(history)
26
- llama_result = llama_client.chat_completion(
27
  messages=[{"role": "user", "content": full_conversation}],
28
  max_tokens=512,
29
  temperature=0.7,
30
  top_p=0.95
31
  )
32
 
33
- return description + "\n\nAssistant: " + llama_result['choices'][0]['message']['content']
34
 
35
  def chat_or_image(image, user_message):
36
  global history
@@ -40,13 +40,13 @@ def chat_or_image(image, user_message):
40
  else:
41
  history.append(f"User: {user_message}")
42
  full_conversation = "\n".join(history)
43
- llama_result = llama_client.chat_completion(
44
  messages=[{"role": "user", "content": full_conversation}],
45
  max_tokens=512,
46
  temperature=0.7,
47
  top_p=0.95
48
  )
49
- return llama_result['choices'][0]['message']['content']
50
 
51
  demo = gr.Interface(
52
  fn=chat_or_image,
 
4
 
5
  moondream_client = Client("vikhyatk/moondream2")
6
 
7
+ qwq_client = InferenceClient("Qwen/QwQ-32B-Preview")
8
 
9
  history = []
10
 
 
23
  history.append(f"Assistant: {description}")
24
 
25
  full_conversation = "\n".join(history)
26
+ qwq_result = qwq_client.chat_completion(
27
  messages=[{"role": "user", "content": full_conversation}],
28
  max_tokens=512,
29
  temperature=0.7,
30
  top_p=0.95
31
  )
32
 
33
+ return description + "\n\nAssistant: " + qwq_result['choices'][0]['message']['content']
34
 
35
  def chat_or_image(image, user_message):
36
  global history
 
40
  else:
41
  history.append(f"User: {user_message}")
42
  full_conversation = "\n".join(history)
43
+ qwq_result = qwq_client.chat_completion(
44
  messages=[{"role": "user", "content": full_conversation}],
45
  max_tokens=512,
46
  temperature=0.7,
47
  top_p=0.95
48
  )
49
+ return qwq_result['choices'][0]['message']['content']
50
 
51
  demo = gr.Interface(
52
  fn=chat_or_image,