|
|
""" |
|
|
run_libero_eval.py |
|
|
|
|
|
Runs a model in a LIBERO simulation environment. |
|
|
|
|
|
Usage: |
|
|
# OpenVLA: |
|
|
# IMPORTANT: Set `center_crop=True` if model is fine-tuned with augmentations |
|
|
oc slot |
|
|
CUDA_VISIBLE_DEVICES=0 python run_orc_model_sample.py \ |
|
|
--model_family openvla \ |
|
|
--saved_dir output_orc |
|
|
""" |
|
|
|
|
|
import random |
|
|
import os |
|
|
import sys |
|
|
from dataclasses import dataclass |
|
|
from pathlib import Path |
|
|
from typing import Optional, Union |
|
|
import torch |
|
|
import draccus |
|
|
import numpy as np |
|
|
import tqdm |
|
|
from transformers import AutoConfig, AutoImageProcessor, AutoModelForVision2Seq, AutoProcessor |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from robot_utils import ( |
|
|
DATE_TIME, |
|
|
) |
|
|
|
|
|
from prismatic.extern.hf.configuration_prismatic import OpenVLAConfig |
|
|
from prismatic.extern.hf.modeling_prismatic import OpenVLAForActionPrediction, EmbodiedObjectSlot, EmbodiedRelationSlot, EmbodiedObject_LangSlot, EmbodiedRelation_LangSlot |
|
|
from prismatic.extern.hf.processing_prismatic import PrismaticImageProcessor, PrismaticProcessor |
|
|
from peft import PeftModel |
|
|
from safetensors.torch import load_file |
|
|
|
|
|
import json |
|
|
|
|
|
def get_processor(cfg): |
|
|
"""Get VLA model's Hugging Face processor.""" |
|
|
processor = AutoProcessor.from_pretrained(os.path.dirname(cfg.custom_param_checkpoint), trust_remote_code=False) |
|
|
return processor |
|
|
|
|
|
def get_vla(cfg): |
|
|
"""Loads and returns a VLA model from checkpoint.""" |
|
|
|
|
|
print("[*] Instantiating Pretrained VLA model") |
|
|
|
|
|
print("[*] Loading in BF16 with Flash-Attention Enabled") |
|
|
|
|
|
AutoConfig.register("openvla", OpenVLAConfig) |
|
|
AutoImageProcessor.register(OpenVLAConfig, PrismaticImageProcessor) |
|
|
AutoProcessor.register(OpenVLAConfig, PrismaticProcessor) |
|
|
AutoModelForVision2Seq.register(OpenVLAConfig, OpenVLAForActionPrediction) |
|
|
|
|
|
base_vla = AutoModelForVision2Seq.from_pretrained( |
|
|
'output_hf_model_openx', |
|
|
attn_implementation="flash_attention_2", |
|
|
torch_dtype=torch.bfloat16, |
|
|
|
|
|
load_in_8bit=cfg.load_in_8bit, |
|
|
load_in_4bit=cfg.load_in_4bit, |
|
|
low_cpu_mem_usage=True, |
|
|
trust_remote_code=False, |
|
|
).to('cuda') |
|
|
print("Loaded base.") |
|
|
|
|
|
base_vla = PeftModel.from_pretrained(base_vla, cfg.pretrained_checkpoint) |
|
|
base_vla = base_vla.merge_and_unload() |
|
|
print("Merged CustomOpenVLA LLM LoRA.") |
|
|
|
|
|
if cfg.slot_type == 'oc': |
|
|
vla = EmbodiedObject_LangSlot(base_model=base_vla, number_of_slots = cfg.number_of_slots) |
|
|
elif cfg.slot_type == 'orc': |
|
|
vla = EmbodiedRelation_LangSlot(base_model=base_vla, number_of_slots = cfg.number_of_slots) |
|
|
|
|
|
weights = load_file(cfg.custom_param_checkpoint) |
|
|
vla.load_state_dict(weights, strict=False) |
|
|
vla.object_centric_tokenizer.requires_grad_(False) |
|
|
vla.object_centric_bbox_head.requires_grad_(False) |
|
|
vla.object_centric_mask_head.requires_grad_(False) |
|
|
vla.requires_grad_(False) |
|
|
vla = vla.to('cuda') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not cfg.load_in_8bit and not cfg.load_in_4bit: |
|
|
vla = vla.to('cuda') |
|
|
|
|
|
|
|
|
dataset_statistics_path = os.path.join(os.path.dirname(cfg.custom_param_checkpoint), "dataset_statistics.json") |
|
|
if os.path.isfile(dataset_statistics_path): |
|
|
with open(dataset_statistics_path, "r") as f: |
|
|
norm_stats = json.load(f) |
|
|
vla.norm_stats = norm_stats |
|
|
else: |
|
|
print( |
|
|
"WARNING: No local dataset_statistics.json file found for current checkpoint.\n" |
|
|
"You can ignore this if you are loading the base VLA (i.e. not fine-tuned) checkpoint." |
|
|
"Otherwise, you may run into errors when trying to call `predict_action()` due to an absent `unnorm_key`." |
|
|
) |
|
|
|
|
|
return vla |
|
|
|
|
|
def get_model(cfg, wrap_diffusion_policy_for_droid=False): |
|
|
"""Load model for evaluation.""" |
|
|
if cfg.model_family == "openvla" or cfg.model_family == "customvla" or cfg.model_family == 'objectvla': |
|
|
model = get_vla(cfg) |
|
|
else: |
|
|
raise ValueError("Unexpected `model_family` found in config.") |
|
|
print(f"Loaded model: {type(model)}") |
|
|
return model |
|
|
|
|
|
def visualize_all(visuals): |
|
|
|
|
|
row1 = [ |
|
|
visuals['exo_rgb'], |
|
|
visuals['exo_rgb_boxed'], |
|
|
visuals['exo_rgb_boxed_contact'], |
|
|
visuals['exo_depth'], |
|
|
visuals['exo_seg'], |
|
|
] |
|
|
|
|
|
row2 = [ |
|
|
visuals['ego_rgb'], |
|
|
visuals['ego_rgb_boxed'], |
|
|
visuals['ego_rgb_boxed_contact'], |
|
|
visuals['ego_depth'], |
|
|
visuals['ego_seg'], |
|
|
] |
|
|
|
|
|
|
|
|
def preprocess_image(image, target_size=(300, 300)): |
|
|
if len(image.shape) == 2: |
|
|
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) |
|
|
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) |
|
|
image = cv2.resize(image, target_size) |
|
|
return image |
|
|
|
|
|
|
|
|
row1_concat = np.hstack([preprocess_image(img) for img in row1]) |
|
|
row2_concat = np.hstack([preprocess_image(img) for img in row2]) |
|
|
|
|
|
|
|
|
final_image = np.vstack([row1_concat, row2_concat]) |
|
|
cv2.imshow('data', final_image) |
|
|
|
|
|
from dataclasses import dataclass, field |
|
|
|
|
|
def get_libero_dummy_action(model_family: str): |
|
|
"""Get dummy/no-op action, used to roll out the simulation while the robot does nothing.""" |
|
|
return [0, 0, 0, 0, 0, 0, -1] |
|
|
|
|
|
@dataclass |
|
|
class GenerateConfig: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_family: str = "openvla" |
|
|
slot_type: str = "orc" |
|
|
number_of_slots: int = 16 |
|
|
pretrained_checkpoint: Union[str, Path] = "./orc_model" |
|
|
custom_param_checkpoint: Union[str, Path] = "./orc_model/relate_object_bboxes_w_mask_actionable_s16h1.safetensors" |
|
|
load_in_8bit: bool = False |
|
|
load_in_4bit: bool = False |
|
|
|
|
|
center_crop: bool = False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
task_suite_name: str = "real_data" |
|
|
num_steps_wait: int = 20 |
|
|
num_trials_per_task: int = 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
run_id_note: Optional[str] = None |
|
|
local_log_dir: str = "./experiments/logs" |
|
|
saved_dir: str = "libero_goal_01_slots" |
|
|
|
|
|
use_wandb: bool = False |
|
|
wandb_project: str = "YOUR_WANDB_PROJECT" |
|
|
wandb_entity: str = "YOUR_WANDB_ENTITY" |
|
|
|
|
|
seed: int = 7 |
|
|
|
|
|
writing_extra_waits: bool = False |
|
|
|
|
|
|
|
|
import re |
|
|
|
|
|
def natural_key(s): |
|
|
return [int(text) if text.isdigit() else text for text in re.split(r'(\d+)', s)] |
|
|
|
|
|
import cv2 |
|
|
import pickle |
|
|
|
|
|
IMAGE_RESOLUTION = 256 |
|
|
from PIL import Image |
|
|
|
|
|
def process_inputs(processor, pixel_values, device): |
|
|
image = Image.fromarray(pixel_values) |
|
|
image = image.convert("RGB") |
|
|
|
|
|
|
|
|
action_dim = 7; future_horizon = 5 |
|
|
prompt = f"In: What action should the robot take?\nOut: " |
|
|
placeholder_seg = "_ _ _ _ _ _ _ _" |
|
|
for fidx in range(future_horizon-1): |
|
|
placeholder_seg += " _ _ _ _ _ _ _" |
|
|
prompt = prompt + placeholder_seg + "</s>" |
|
|
|
|
|
|
|
|
inputs = processor(prompt, image).to(device, dtype=torch.float16) |
|
|
inputs['input_ids'][0][-2-(action_dim*future_horizon)] = 29871 |
|
|
|
|
|
|
|
|
|
|
|
return inputs |
|
|
|
|
|
def set_seed_everywhere(seed: int): |
|
|
"""Sets the random seed for Python, NumPy, and PyTorch functions.""" |
|
|
torch.manual_seed(seed) |
|
|
torch.cuda.manual_seed_all(seed) |
|
|
np.random.seed(seed) |
|
|
random.seed(seed) |
|
|
torch.backends.cudnn.deterministic = True |
|
|
torch.backends.cudnn.benchmark = False |
|
|
os.environ["PYTHONHASHSEED"] = str(seed) |
|
|
|
|
|
|
|
|
def get_image_resize_size(cfg): |
|
|
""" |
|
|
Gets image resize size for a model class. |
|
|
If `resize_size` is an int, then the resized image will be a square. |
|
|
Else, the image will be a rectangle. |
|
|
""" |
|
|
if cfg.model_family == "openvla" or cfg.model_family == "customvla" or cfg.model_family == 'objectvla': |
|
|
resize_size = 224 |
|
|
else: |
|
|
raise ValueError("Unexpected `model_family` found in config.") |
|
|
return resize_size |
|
|
|
|
|
|
|
|
def get_current_slots(vla, batch, task_texts, in_past_tokens, device_id='cuda'): |
|
|
pixel_values = batch["pixel_values"].to(torch.bfloat16).to(device_id) |
|
|
outputs = vla.get_obj_slots(pixel_values, task_texts, in_past_tokens) |
|
|
return outputs |
|
|
|
|
|
def get_normalized_actions(vla, batch, slot_outputs, device_id): |
|
|
|
|
|
patch_features = slot_outputs['patch_features'] |
|
|
slotted_features = slot_outputs['visual_tokens'] |
|
|
if 'interactable_features' in slot_outputs: |
|
|
slotted_features, _ = vla.select_top_k_slots( |
|
|
slotted_features, slot_outputs['interactable_features'], k=4) |
|
|
clip_embeddings = slot_outputs['texts'] |
|
|
clip_attention_mask = torch.logical_not(slot_outputs['texts_attn']) |
|
|
llama_input_ids = batch["input_ids"].to(device_id) |
|
|
llama_attention_mask = batch["attention_mask"].to(device_id) |
|
|
|
|
|
continuous_actions_pred = vla.decode_continuous_actions( |
|
|
patch_features=patch_features, |
|
|
slotted_features=slotted_features, |
|
|
clip_embeddings=clip_embeddings, |
|
|
clip_attention_mask=clip_attention_mask, |
|
|
llama_input_ids=llama_input_ids, |
|
|
llama_attention_mask=llama_attention_mask, |
|
|
llama_labels=None |
|
|
) |
|
|
action_chunk, action_dim = continuous_actions_pred.shape[1:] |
|
|
|
|
|
return continuous_actions_pred[:,0,:] |
|
|
|
|
|
def denormalize_actions(model, unnorm_key, normalized_actions): |
|
|
|
|
|
action_norm_stats = model.get_action_stats(unnorm_key) |
|
|
mask = action_norm_stats.get("mask", np.ones_like(action_norm_stats["q01"], dtype=bool)) |
|
|
action_high, action_low = np.array(action_norm_stats["q99"]), np.array(action_norm_stats["q01"]) |
|
|
actions = np.where( |
|
|
mask, |
|
|
0.5 * (normalized_actions + 1) * (action_high - action_low) + action_low, |
|
|
normalized_actions, |
|
|
) |
|
|
return actions |
|
|
|
|
|
def normalize_gripper_action(action, binarize=True): |
|
|
""" |
|
|
Changes gripper action (last dimension of action vector) from [0,1] to [-1,+1]. |
|
|
Necessary for some environments (not Bridge) because the dataset wrapper standardizes gripper actions to [0,1]. |
|
|
Note that unlike the other action dimensions, the gripper action is not normalized to [-1,+1] by default by |
|
|
the dataset wrapper. |
|
|
|
|
|
Normalization formula: y = 2 * (x - orig_low) / (orig_high - orig_low) - 1 |
|
|
""" |
|
|
|
|
|
orig_low, orig_high = 0.0, 1.0 |
|
|
action[..., -1] = 2 * (action[..., -1] - orig_low) / (orig_high - orig_low) - 1 |
|
|
|
|
|
if binarize: |
|
|
|
|
|
action[..., -1] = np.sign(action[..., -1]) |
|
|
|
|
|
return action |
|
|
|
|
|
|
|
|
|
|
|
@draccus.wrap() |
|
|
def eval_libero(cfg: GenerateConfig) -> None: |
|
|
assert cfg.pretrained_checkpoint is not None, "cfg.pretrained_checkpoint must not be None!" |
|
|
|
|
|
|
|
|
assert not (cfg.load_in_8bit and cfg.load_in_4bit), "Cannot use both 8-bit and 4-bit quantization!" |
|
|
|
|
|
|
|
|
set_seed_everywhere(cfg.seed) |
|
|
|
|
|
|
|
|
cfg.unnorm_key = cfg.task_suite_name |
|
|
|
|
|
|
|
|
model = get_model(cfg) |
|
|
|
|
|
|
|
|
if cfg.model_family == "openvla": |
|
|
|
|
|
|
|
|
if cfg.unnorm_key not in model.norm_stats and f"{cfg.unnorm_key}_no_noops" in model.norm_stats: |
|
|
cfg.unnorm_key = f"{cfg.unnorm_key}_no_noops" |
|
|
assert cfg.unnorm_key in model.norm_stats, f"Action un-norm key {cfg.unnorm_key} not found in VLA `norm_stats`!" |
|
|
|
|
|
|
|
|
processor = None |
|
|
if cfg.model_family == "openvla": |
|
|
processor = get_processor(cfg) |
|
|
|
|
|
|
|
|
run_id = f"EVAL-{cfg.task_suite_name}-{cfg.model_family}-{DATE_TIME}" |
|
|
if cfg.run_id_note is not None: |
|
|
run_id += f"--{cfg.run_id_note}" |
|
|
os.makedirs(cfg.local_log_dir, exist_ok=True) |
|
|
local_log_filepath = os.path.join(cfg.local_log_dir, run_id + ".txt") |
|
|
log_file = open(local_log_filepath, "w") |
|
|
print(f"Logging to local log file: {local_log_filepath}") |
|
|
|
|
|
|
|
|
benchmark_dict = benchmark.get_benchmark_dict() |
|
|
task_suite = benchmark_dict[cfg.task_suite_name]() |
|
|
num_tasks_in_suite = task_suite.n_tasks |
|
|
print(f"Task suite: {cfg.task_suite_name}") |
|
|
log_file.write(f"Task suite: {cfg.task_suite_name}\n") |
|
|
|
|
|
|
|
|
resize_size = get_image_resize_size(cfg) |
|
|
|
|
|
|
|
|
total_episodes, total_successes = 0, 0 |
|
|
extra_waits_dict = {} |
|
|
for task_id in tqdm.tqdm(range(num_tasks_in_suite)): |
|
|
|
|
|
task = task_suite.get_task(task_id) |
|
|
|
|
|
|
|
|
initial_states = task_suite.get_task_init_states(task_id) |
|
|
|
|
|
|
|
|
env, task_description = get_libero_env(task, cfg.model_family, resolution=256) |
|
|
|
|
|
|
|
|
task_episodes, task_successes = 0, 0 |
|
|
extra_waits = [] |
|
|
for episode_idx in tqdm.tqdm(range(cfg.num_trials_per_task)): |
|
|
print(f"\nTask: {task_description}") |
|
|
log_file.write(f"\nTask: {task_description}\n") |
|
|
|
|
|
|
|
|
env.reset() |
|
|
|
|
|
|
|
|
obs = env.set_init_state(initial_states[episode_idx]) |
|
|
|
|
|
|
|
|
t = 0 |
|
|
replay_images = [] |
|
|
if cfg.task_suite_name == "libero_spatial": |
|
|
max_steps = 220 |
|
|
elif cfg.task_suite_name == "libero_object": |
|
|
max_steps = 280 |
|
|
elif cfg.task_suite_name == "libero_goal": |
|
|
max_steps = 300 |
|
|
elif cfg.task_suite_name == "libero_10": |
|
|
max_steps = 520 |
|
|
elif cfg.task_suite_name == "libero_90": |
|
|
max_steps = 400 |
|
|
|
|
|
t = 0 |
|
|
while t < cfg.num_steps_wait: |
|
|
|
|
|
|
|
|
|
|
|
obs, reward, done, info = env.step(get_libero_dummy_action(cfg.model_family)) |
|
|
t += 1 |
|
|
|
|
|
|
|
|
task_texts = [task_description] |
|
|
texts = ['robot ' + task_description] |
|
|
img = get_libero_image(obs, resize_size) |
|
|
img = img[:,::-1,:] |
|
|
batch_data = process_inputs(processor=processor, pixel_values=img, device='cuda') |
|
|
|
|
|
horizon = 1 |
|
|
batch_data['pixel_values'] = batch_data['pixel_values'].unsqueeze(1).repeat(1,horizon,1,1,1) |
|
|
|
|
|
|
|
|
with torch.autocast("cuda", dtype=torch.bfloat16): |
|
|
slot_outputs = get_current_slots(model, batch_data, texts, in_past_tokens=None, device_id='cuda') |
|
|
|
|
|
|
|
|
with torch.autocast("cuda", dtype=torch.bfloat16): |
|
|
actions = get_normalized_actions(model, batch_data, slot_outputs, device_id='cuda') |
|
|
actions = actions.detach().float().cpu().numpy() |
|
|
unnormalized_actions = denormalize_actions(model, cfg.unnorm_key, actions) |
|
|
|
|
|
|
|
|
while t < max_steps + cfg.num_steps_wait: |
|
|
|
|
|
img = get_libero_image(obs, resize_size) |
|
|
img = img[:,::-1,:] |
|
|
batch_data = process_inputs(processor=processor, pixel_values=img, device='cuda') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
replay_images.append(img) |
|
|
|
|
|
|
|
|
batch_data['pixel_values'] = batch_data['pixel_values'].unsqueeze(1) |
|
|
|
|
|
with torch.autocast("cuda", dtype=torch.bfloat16): |
|
|
slot_outputs = get_current_slots(model, batch_data, texts, in_past_tokens=None, device_id='cuda') |
|
|
|
|
|
|
|
|
with torch.autocast("cuda", dtype=torch.bfloat16): |
|
|
actions = get_normalized_actions(model, batch_data, slot_outputs, device_id='cuda') |
|
|
actions = actions.detach().float().cpu().numpy() |
|
|
unnormalized_actions = denormalize_actions(model, cfg.unnorm_key, actions) |
|
|
action = unnormalized_actions[0] |
|
|
|
|
|
|
|
|
action = normalize_gripper_action(action, binarize=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
obs, reward, done, info = env.step(action.tolist()) |
|
|
t += 1 |
|
|
if done: |
|
|
task_successes += 1 |
|
|
total_successes += 1 |
|
|
break |
|
|
|
|
|
task_episodes += 1 |
|
|
total_episodes += 1 |
|
|
|
|
|
|
|
|
cv2.destroyAllWindows() |
|
|
save_rollout_video( |
|
|
replay_images, total_episodes, success=done, task_description=task_description, log_file=log_file, saved_dir=cfg.saved_dir |
|
|
) |
|
|
|
|
|
print(f"Success: {done}") |
|
|
print(f"# episodes completed so far: {total_episodes}") |
|
|
print(f"# successes: {total_successes} ({total_successes / total_episodes * 100:.1f}%)") |
|
|
log_file.write(f"Success: {done}\n") |
|
|
log_file.write(f"# episodes completed so far: {total_episodes}\n") |
|
|
log_file.write(f"# successes: {total_successes} ({total_successes / total_episodes * 100:.1f}%)\n") |
|
|
log_file.flush() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(f"Current task success rate: {float(task_successes) / float(task_episodes)}") |
|
|
print(f"Current total success rate: {float(total_successes) / float(total_episodes)}") |
|
|
log_file.write(f"Current task success rate: {float(task_successes) / float(task_episodes)}\n") |
|
|
log_file.write(f"Current total success rate: {float(total_successes) / float(total_episodes)}\n") |
|
|
log_file.flush() |
|
|
|
|
|
|
|
|
log_file.close() |
|
|
|
|
|
if __name__ == "__main__": |
|
|
eval_libero() |
|
|
|