zamal commited on
Commit
6da457d
·
verified ·
1 Parent(s): 07783a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -14
app.py CHANGED
@@ -3,15 +3,6 @@ from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
3
  from PIL import Image
4
  import torch
5
 
6
- import os
7
-
8
- # Example command to install a package (e.g., numpy)
9
- os.system("pip install bitsandbytes-cuda111")
10
- os.system("pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118")
11
-
12
-
13
-
14
-
15
  # Define the repository for the quantized model
16
  repo_name = "cyan2k/molmo-7B-D-bnb-4bit"
17
  arguments = {"device_map": "auto", "torch_dtype": "auto", "trust_remote_code": True}
@@ -28,7 +19,7 @@ def process_image_and_text(image, text):
28
  )
29
 
30
  # Move inputs to the same device as the model (GPU) and make a batch of size 1
31
- inputs = {k: v.unsqueeze(0) for k, v in inputs.items()}
32
 
33
  # Generate output
34
  output = model.generate(
@@ -51,11 +42,11 @@ def chatbot(image, text, history):
51
  # Define the Gradio interface
52
  with gr.Blocks() as demo:
53
  gr.Markdown("# Image Chatbot with Molmo-7B-4 Bit Quantized")
54
-
55
  with gr.Row():
56
  image_input = gr.Image(type="numpy")
57
  chatbot_output = gr.Chatbot()
58
-
59
  text_input = gr.Textbox(placeholder="Ask a question about the image...")
60
  submit_button = gr.Button("Submit")
61
 
@@ -73,5 +64,4 @@ with gr.Blocks() as demo:
73
  outputs=[chatbot_output]
74
  )
75
 
76
- demo.launch()
77
-
 
3
  from PIL import Image
4
  import torch
5
 
 
 
 
 
 
 
 
 
 
6
  # Define the repository for the quantized model
7
  repo_name = "cyan2k/molmo-7B-D-bnb-4bit"
8
  arguments = {"device_map": "auto", "torch_dtype": "auto", "trust_remote_code": True}
 
19
  )
20
 
21
  # Move inputs to the same device as the model (GPU) and make a batch of size 1
22
+ inputs = {k: v.to(model.device).unsqueeze(0) for k, v in inputs.items()}
23
 
24
  # Generate output
25
  output = model.generate(
 
42
  # Define the Gradio interface
43
  with gr.Blocks() as demo:
44
  gr.Markdown("# Image Chatbot with Molmo-7B-4 Bit Quantized")
45
+
46
  with gr.Row():
47
  image_input = gr.Image(type="numpy")
48
  chatbot_output = gr.Chatbot()
49
+
50
  text_input = gr.Textbox(placeholder="Ask a question about the image...")
51
  submit_button = gr.Button("Submit")
52
 
 
64
  outputs=[chatbot_output]
65
  )
66
 
67
+ demo.launch()