Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from PIL import Image
|
3 |
+
import numpy as np
|
4 |
+
import torch as T
|
5 |
+
import transformers
|
6 |
+
|
7 |
+
# Assuming necessary model and tokenizer are already set up
|
8 |
+
PATH_LLAVA = '_ckpt/LLaVA-7B-v1'
|
9 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(PATH_LLAVA)
|
10 |
+
model = transformers.AutoModelForCausalLM.from_pretrained(PATH_LLAVA).cuda()
|
11 |
+
|
12 |
+
def remove_alter(s): # Simplify expressive instruction
|
13 |
+
if 'ASSISTANT:' in s: s = s[s.index('ASSISTANT:')+10:].strip()
|
14 |
+
if '</s>' in s: s = s[:s.index('</s>')].strip()
|
15 |
+
if 'alternative' in s.lower(): s = s[:s.lower().index('alternative')]
|
16 |
+
if '[IMG0]' in s: s = s[:s.index('[IMG0]')]
|
17 |
+
s = '.'.join([s.strip() for s in s.split('.')[:2]])
|
18 |
+
if s[-1]!='.': s += '.'
|
19 |
+
return s.strip()
|
20 |
+
|
21 |
+
def load_image_and_generate_instruction(image_path):
|
22 |
+
# Load the image
|
23 |
+
img = Image.open(image_path)
|
24 |
+
img.show()
|
25 |
+
|
26 |
+
# Example: Generate a simple instruction based on the image
|
27 |
+
# This is a placeholder. You would replace this with your own method
|
28 |
+
# to analyze the image and generate a textual description or instruction.
|
29 |
+
instruction = "Describe what to do with this image."
|
30 |
+
|
31 |
+
# Simplify and generate expressive instruction
|
32 |
+
expressive_instruction = remove_alter(instruction)
|
33 |
+
print("Expressive Instruction:", expressive_instruction)
|
34 |
+
|
35 |
+
# Example usage
|
36 |
+
image_path = './path/to/your/image.jpg' # Update this path to your image
|
37 |
+
load_image_and_generate_instruction(image_path)
|