suayptalha commited on
Commit
a7527fb
·
verified ·
1 Parent(s): 1c449ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -15
app.py CHANGED
@@ -20,14 +20,30 @@ def describe_image(image, user_message):
20
  history.append(f"Assistant: {description}")
21
 
22
  full_conversation = "\n".join(history)
23
- llama_result = llama_client.chat_completion(
24
- messages=[{"role": "user", "content": full_conversation}],
25
- max_tokens=512,
26
- temperature=0.7,
27
- top_p=0.95
28
- )
 
 
 
 
 
 
29
 
30
- return description + "\n\nAssistant: " + llama_result['choices'][0]['message']['content']
 
 
 
 
 
 
 
 
 
 
31
 
32
  def chat_or_image(image, user_message):
33
  global history
@@ -35,14 +51,7 @@ def chat_or_image(image, user_message):
35
  return describe_image(image, user_message)
36
  else:
37
  history.append(f"User: {user_message}")
38
- full_conversation = "\n".join(history)
39
- llama_result = llama_client.chat_completion(
40
- messages=[{"role": "user", "content": full_conversation}],
41
- max_tokens=512,
42
- temperature=0.7,
43
- top_p=0.95
44
- )
45
- return llama_result['choices'][0]['message']['content']
46
 
47
  demo = gr.Interface(
48
  fn=chat_or_image,
 
20
  history.append(f"Assistant: {description}")
21
 
22
  full_conversation = "\n".join(history)
23
+ return respond(user_message, history, "System: Describe the image.", 512, 0.7, 0.95)
24
+
25
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
26
+ messages = [{"role": "system", "content": system_message}]
27
+
28
+ for val in history:
29
+ if val[0]:
30
+ messages.append({"role": "user", "content": val[0]})
31
+ if val[1]:
32
+ messages.append({"role": "assistant", "content": val[1]})
33
+
34
+ messages.append({"role": "user", "content": message})
35
 
36
+ response = ""
37
+ for message in llama_client.chat_completion(
38
+ messages,
39
+ max_tokens=max_tokens,
40
+ stream=True,
41
+ temperature=temperature,
42
+ top_p=top_p,
43
+ ):
44
+ token = message.choices[0].delta.content
45
+ response += token
46
+ yield response
47
 
48
  def chat_or_image(image, user_message):
49
  global history
 
51
  return describe_image(image, user_message)
52
  else:
53
  history.append(f"User: {user_message}")
54
+ return respond(user_message, history, "System: Answer the user's question.", 512, 0.7, 0.95)
 
 
 
 
 
 
 
55
 
56
  demo = gr.Interface(
57
  fn=chat_or_image,