TuringsSolutions commited on
Commit
3f88864
·
verified ·
1 Parent(s): 8cd9c33

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -12
app.py CHANGED
@@ -12,7 +12,7 @@ model = LlavaForConditionalGeneration.from_pretrained(model_id).to("cpu")
12
  # Initialize inference clients
13
  client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
14
 
15
- def llava(inputs, history):
16
  """Processes an image and text input using Llava."""
17
  image = Image.open(inputs["files"][0]).convert("RGB")
18
  prompt = f"<|im_start|>user <image>\n{inputs['text']}<|im_end|>"
@@ -23,18 +23,21 @@ def respond(message, history):
23
  """Generate a response based on text or image input."""
24
  if "files" in message and message["files"]:
25
  # Handle image + text input
26
- inputs = llava(message, history)
27
  streamer = TextIteratorStreamer(skip_prompt=True, skip_special_tokens=True)
28
  thread = Thread(target=model.generate, kwargs=dict(inputs=inputs, max_new_tokens=512, streamer=streamer))
29
  thread.start()
 
30
  buffer = ""
31
  for new_text in streamer:
32
  buffer += new_text
33
- yield buffer
 
 
34
  else:
35
  # Handle text-only input
36
  user_message = message["text"]
37
- history.append([user_message, None]) # Append user message to history
38
 
39
  # Prepare prompt for the language model
40
  prompt = [{"role": "user", "content": msg[0]} for msg in history if msg[0]]
@@ -42,8 +45,8 @@ def respond(message, history):
42
 
43
  # Extract response and update history
44
  bot_message = response["choices"][0]["message"]["content"]
45
- history[-1][1] = bot_message # Update the last entry with bot's response
46
- yield history
47
 
48
  def generate_image(prompt):
49
  """Generates an image based on the user prompt."""
@@ -57,20 +60,18 @@ with gr.Blocks() as demo:
57
  with gr.Column():
58
  text_input = gr.Textbox(placeholder="Enter your message...")
59
  file_input = gr.File(label="Upload an image")
60
- with gr.Column():
61
- output = gr.Image(label="Generated Image")
62
 
63
  def handle_text(text, history=[]):
64
  """Handle text input and generate responses."""
65
- return respond({"text": text}, history), history
66
 
67
  def handle_file_upload(files, history=[]):
68
  """Handle file uploads and generate responses."""
69
- return respond({"files": files, "text": "Describe this image."}, history), history
70
 
71
  # Connect components to callbacks
72
- text_input.submit(handle_text, [text_input, chatbot], [chatbot])
73
- file_input.change(handle_file_upload, [file_input, chatbot], [chatbot])
74
 
75
  # Launch the Gradio app
76
  demo.launch()
 
12
  # Initialize inference clients
13
  client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
14
 
15
+ def llava(inputs):
16
  """Processes an image and text input using Llava."""
17
  image = Image.open(inputs["files"][0]).convert("RGB")
18
  prompt = f"<|im_start|>user <image>\n{inputs['text']}<|im_end|>"
 
23
  """Generate a response based on text or image input."""
24
  if "files" in message and message["files"]:
25
  # Handle image + text input
26
+ inputs = llava(message)
27
  streamer = TextIteratorStreamer(skip_prompt=True, skip_special_tokens=True)
28
  thread = Thread(target=model.generate, kwargs=dict(inputs=inputs, max_new_tokens=512, streamer=streamer))
29
  thread.start()
30
+
31
  buffer = ""
32
  for new_text in streamer:
33
  buffer += new_text
34
+ history[-1][1] = buffer # Update the latest message in history
35
+ yield history, history # Yield both chatbot and history for updating
36
+
37
  else:
38
  # Handle text-only input
39
  user_message = message["text"]
40
+ history.append([user_message, None]) # Add user's message with a placeholder response
41
 
42
  # Prepare prompt for the language model
43
  prompt = [{"role": "user", "content": msg[0]} for msg in history if msg[0]]
 
45
 
46
  # Extract response and update history
47
  bot_message = response["choices"][0]["message"]["content"]
48
+ history[-1][1] = bot_message # Update the latest message with bot's response
49
+ yield history, history # Yield both chatbot and history for updating
50
 
51
  def generate_image(prompt):
52
  """Generates an image based on the user prompt."""
 
60
  with gr.Column():
61
  text_input = gr.Textbox(placeholder="Enter your message...")
62
  file_input = gr.File(label="Upload an image")
 
 
63
 
64
  def handle_text(text, history=[]):
65
  """Handle text input and generate responses."""
66
+ return respond({"text": text}, history)
67
 
68
  def handle_file_upload(files, history=[]):
69
  """Handle file uploads and generate responses."""
70
+ return respond({"files": files, "text": "Describe this image."}, history)
71
 
72
  # Connect components to callbacks
73
+ text_input.submit(handle_text, [text_input, chatbot], [chatbot, chatbot])
74
+ file_input.change(handle_file_upload, [file_input, chatbot], [chatbot, chatbot])
75
 
76
  # Launch the Gradio app
77
  demo.launch()