|
|
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig |
|
|
from PIL import Image |
|
|
import requests |
|
|
import torch |
|
|
import json |
|
|
import re |
|
|
import os |
|
|
from torch.nn.utils.rnn import pad_sequence |
|
|
from datasets import load_dataset |
|
|
import re |
|
|
|
|
|
|
|
|
dataset = load_dataset('moondream/analog-clock-benchmark', split='test', streaming=True) |
|
|
|
|
|
|
|
|
result_location = './molmo-answers.json' |
|
|
model_path = 'moondream/Molmo-72B-0924' |
|
|
processor = AutoProcessor.from_pretrained( |
|
|
model_path, |
|
|
trust_remote_code=True, |
|
|
torch_dtype='auto', |
|
|
device_map='auto' |
|
|
) |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_path, |
|
|
trust_remote_code=True, |
|
|
torch_dtype='auto', |
|
|
device_map='auto' |
|
|
) |
|
|
|
|
|
def extract_points_and_text(input_string): |
|
|
|
|
|
points_pattern = r'(?:x|y)(\d*)=["\']?([\d.]+)["\']?' |
|
|
|
|
|
matches = re.findall(points_pattern, input_string) |
|
|
|
|
|
|
|
|
point_dict = {} |
|
|
for index, value in matches: |
|
|
point_dict.setdefault(index, []).append(float(value)) |
|
|
|
|
|
|
|
|
points = [point for point in point_dict.values() if len(point) == 2] |
|
|
|
|
|
text_pattern = r'<(?:point|points)[^>]*>(.*?)</(?:point|points)>' |
|
|
text_match = re.search(text_pattern, input_string) |
|
|
text = text_match.group(1) if text_match else "" |
|
|
|
|
|
cleaned_string = re.sub(text_pattern, text, input_string) |
|
|
|
|
|
|
|
|
answers = [int(num) for num in re.findall(r'\b\d+\b', cleaned_string)] |
|
|
|
|
|
result = { |
|
|
"points": points, |
|
|
"cleaned_string": cleaned_string, |
|
|
"answers": answers |
|
|
} |
|
|
|
|
|
return result |
|
|
|
|
|
def do_inference(image, text): |
|
|
inputs = processor.process( |
|
|
images=[image], |
|
|
text=text |
|
|
) |
|
|
|
|
|
|
|
|
inputs = {k: v.to(model.device).unsqueeze(0) for k, v in inputs.items()} |
|
|
|
|
|
|
|
|
inputs["images"] = inputs["images"].to(torch.float16) |
|
|
output = model.generate_from_batch( |
|
|
inputs, |
|
|
GenerationConfig(max_new_tokens=200, stop_strings="<|endoftext|>", attention_type='flash'), |
|
|
tokenizer=processor.tokenizer |
|
|
) |
|
|
|
|
|
|
|
|
generated_tokens = output[0,inputs['input_ids'].size(1):] |
|
|
generated_text = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True) |
|
|
|
|
|
return generated_text |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not os.path.exists(result_location): |
|
|
os.makedirs(os.path.dirname(result_location), exist_ok=True) |
|
|
with open(result_location, 'w') as file: |
|
|
json.dump([], file) |
|
|
|
|
|
def has_one_clock(image): |
|
|
molmo_res_text = do_inference(image, 'How many clocks are in this image?') |
|
|
print('Clock count -> ', molmo_res_text) |
|
|
|
|
|
if molmo_res_text.strip().endswith("1.") and molmo_res_text.strip().startswith(" Counting the"): |
|
|
return True |
|
|
else: |
|
|
return False |
|
|
|
|
|
|
|
|
def get_time(image): |
|
|
return do_inference(image, '''Look at this image with a readable clock and do the following: |
|
|
1. Report the hour hand location |
|
|
2. Report the minute hand location |
|
|
3. Report the time based on these locations''') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
datapoint_index = 0 |
|
|
for datapoint in dataset: |
|
|
molmo_time_string = get_time(datapoint['image']) |
|
|
print(molmo_time_string) |
|
|
print('\n\n\n\n\n') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
time_pattern = r'\b\d{1,2}:\d{2}\b' |
|
|
times_found = re.findall(time_pattern, molmo_time_string) |
|
|
|
|
|
if len(times_found) == 1 and times_found[0] != "10:10": |
|
|
obj_to_save = { |
|
|
'datapoint_index': datapoint_index, |
|
|
'correct_answer': molmo_time_string, |
|
|
'reported_times': times_found, |
|
|
'llm_string': molmo_time_string |
|
|
} |
|
|
with open(result_location, 'r+') as file: |
|
|
data = json.load(file) |
|
|
data.append(obj_to_save) |
|
|
file.seek(0) |
|
|
json.dump(data, file, indent=4) |
|
|
file.truncate() |