mjavaid commited on
Commit
142a90d
·
1 Parent(s): add1057

first commit

Browse files
Files changed (2) hide show
  1. app.py +5 -17
  2. requirements.txt +1 -1
app.py CHANGED
@@ -16,35 +16,23 @@ pipe = pipeline(
16
  use_auth_token=hf_token
17
  )
18
  @spaces.GPU
 
19
  def generate_response(user_text, user_image, history):
20
- """
21
- This function takes the user's text and an optional image,
22
- creates a message list (including a system message), and then
23
- uses the Gemma 3 pipeline to generate a response.
24
- """
25
- # Start with a system prompt.
26
  messages = [
27
  {
28
  "role": "system",
29
  "content": [{"type": "text", "text": "You are a helpful assistant."}]
30
  }
31
  ]
32
- # Build user content from the provided image and/or text.
33
  user_content = []
34
  if user_image is not None:
35
- # The image input is a PIL image.
36
  user_content.append({"type": "image", "image": user_image})
37
  if user_text:
38
  user_content.append({"type": "text", "text": user_text})
39
  messages.append({"role": "user", "content": user_content})
40
 
41
- # Generate output using the model pipeline.
42
  output = pipe(text=messages, max_new_tokens=200)
43
- # Extract the generated text.
44
- # (This extraction follows the code snippet provided in the model card.)
45
  response = output[0][0]["generated_text"][-1]["content"]
46
-
47
- # Append the new exchange to the conversation history.
48
  history.append((user_text, response))
49
  return history, history
50
 
@@ -54,12 +42,12 @@ with gr.Blocks() as demo:
54
  "This interface lets you chat with the Gemma 3 model. "
55
  "You can type a message and optionally attach an image."
56
  )
57
- chatbot = gr.Chatbot()
58
  with gr.Row():
59
  txt = gr.Textbox(show_label=False, placeholder="Type your message here...", container=False)
60
- img = gr.Image(source="upload", tool="editor", type="pil", label="Attach an image (optional)")
 
61
  state = gr.State([])
62
 
63
- # When the user submits text (and possibly an image), call generate_response.
64
  txt.submit(generate_response, inputs=[txt, img, state], outputs=[chatbot, state])
65
- demo.launch()
 
16
  use_auth_token=hf_token
17
  )
18
  @spaces.GPU
19
+
20
  def generate_response(user_text, user_image, history):
 
 
 
 
 
 
21
  messages = [
22
  {
23
  "role": "system",
24
  "content": [{"type": "text", "text": "You are a helpful assistant."}]
25
  }
26
  ]
 
27
  user_content = []
28
  if user_image is not None:
 
29
  user_content.append({"type": "image", "image": user_image})
30
  if user_text:
31
  user_content.append({"type": "text", "text": user_text})
32
  messages.append({"role": "user", "content": user_content})
33
 
 
34
  output = pipe(text=messages, max_new_tokens=200)
 
 
35
  response = output[0][0]["generated_text"][-1]["content"]
 
 
36
  history.append((user_text, response))
37
  return history, history
38
 
 
42
  "This interface lets you chat with the Gemma 3 model. "
43
  "You can type a message and optionally attach an image."
44
  )
45
+ chatbot = gr.Chatbot(type="messages")
46
  with gr.Row():
47
  txt = gr.Textbox(show_label=False, placeholder="Type your message here...", container=False)
48
+ # Removed the 'source' parameter to avoid the error.
49
+ img = gr.Image(tool="editor", type="pil", label="Attach an image (optional)")
50
  state = gr.State([])
51
 
 
52
  txt.submit(generate_response, inputs=[txt, img, state], outputs=[chatbot, state])
53
+ demo.launch()
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- git+https://github.com/huggingface/[email protected]-release
2
  gradio
3
  torch
4
  Pillow
 
1
+ git+https://github.com/huggingface/[email protected]
2
  gradio
3
  torch
4
  Pillow