cistine's picture
Upload 490 files
b98ffbb verified
raw
history blame
4.22 kB
from dora import DoraStatus
import pyarrow as pa
from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
import torch
import time
import awq_ext
CAMERA_WIDTH = 960
CAMERA_HEIGHT = 540
PROCESSOR = AutoProcessor.from_pretrained("/home/peiji/idefics2-8b-AWQ")
BAD_WORDS_IDS = PROCESSOR.tokenizer(
["<image>", "<fake_token_around_image>"], add_special_tokens=False
).input_ids
EOS_WORDS_IDS = PROCESSOR.tokenizer(
"<end_of_utterance>", add_special_tokens=False
).input_ids + [PROCESSOR.tokenizer.eos_token_id]
model = AutoModelForVision2Seq.from_pretrained(
"/home/peiji/idefics2-8b-AWQ",
quantization_config=AwqConfig(
bits=4,
fuse_max_seq_len=4096,
modules_to_fuse={
"attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
"mlp": ["gate_proj", "up_proj", "down_proj"],
"layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
"use_alibi": False,
"num_attention_heads": 32,
"num_key_value_heads": 8,
"hidden_size": 4096,
},
),
trust_remote_code=True,
).to("cuda")
def reset_awq_cache(model):
"""
Simple method to reset the AWQ fused modules cache
"""
from awq.modules.fused.attn import QuantAttentionFused
for name, module in model.named_modules():
if isinstance(module, QuantAttentionFused):
module.start_pos = 0
def ask_vlm(image, instruction):
global model
prompts = [
"User:",
image,
f"{instruction}.<end_of_utterance>\n",
"Assistant:",
]
inputs = {k: torch.tensor(v).to("cuda") for k, v in PROCESSOR(prompts).items()}
generated_ids = model.generate(
**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=25, repetition_penalty=1.2
)
generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
reset_awq_cache(model)
return generated_texts[0].split("\nAssistant: ")[1]
class Operator:
def __init__(self):
self.state = "person"
self.last_output = False
def on_event(
self,
dora_event,
send_output,
) -> DoraStatus:
if dora_event["type"] == "INPUT":
image = (
dora_event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
)
if self.state == "person":
output = ask_vlm(image, "Can you read the note?").lower()
print(output, flush=True)
if "coffee" in output or "tea" in output or "water" in output:
send_output(
"control",
pa.array([-3.0, 0.0, 0.0, 0.8, 0.0, 10.0, 180.0]),
)
send_output(
"speak",
pa.array([output + ". Going to the kitchen."]),
)
time.sleep(10)
self.state = "coffee"
self.last_output = False
elif not self.last_output:
self.last_output = True
send_output(
"speak",
pa.array([output]),
)
time.sleep(4)
elif self.state == "coffee":
output = ask_vlm(image, "Is there a person with a hands up?").lower()
print(output, flush=True)
if "yes" in output:
send_output(
"speak",
pa.array([output + ". Going to the office."]),
)
send_output(
"control",
pa.array([2.0, 0.0, 0.0, 0.8, 0.0, 10.0, 0.0]),
)
time.sleep(10)
self.state = "person"
self.last_output = False
elif not self.last_output:
self.last_output = True
send_output(
"speak",
pa.array([output]),
)
time.sleep(4)
return DoraStatus.CONTINUE