real_models / run_orc_model_sample.py
TrieTran
Upload folder using huggingface_hub
58bf47f verified
"""
run_libero_eval.py
Runs a model in a LIBERO simulation environment.
Usage:
# OpenVLA:
# IMPORTANT: Set `center_crop=True` if model is fine-tuned with augmentations
oc slot
CUDA_VISIBLE_DEVICES=0 python run_orc_model_sample.py \
--model_family openvla \
--saved_dir output_orc
"""
import random
import os
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Union
import torch
import draccus
import numpy as np
import tqdm
from transformers import AutoConfig, AutoImageProcessor, AutoModelForVision2Seq, AutoProcessor
# import wandb
# Append current directory so that interpreter can find experiments.robot
# from libero_utils import (
# get_libero_env,
# get_libero_image,
# save_rollout_video,
# )
from robot_utils import (
DATE_TIME,
)
from prismatic.extern.hf.configuration_prismatic import OpenVLAConfig
from prismatic.extern.hf.modeling_prismatic import OpenVLAForActionPrediction, EmbodiedObjectSlot, EmbodiedRelationSlot, EmbodiedObject_LangSlot, EmbodiedRelation_LangSlot
from prismatic.extern.hf.processing_prismatic import PrismaticImageProcessor, PrismaticProcessor
from peft import PeftModel
from safetensors.torch import load_file
import json
def get_processor(cfg):
"""Get VLA model's Hugging Face processor."""
processor = AutoProcessor.from_pretrained(os.path.dirname(cfg.custom_param_checkpoint), trust_remote_code=False)
return processor
def get_vla(cfg):
"""Loads and returns a VLA model from checkpoint."""
# Load VLA checkpoint.
print("[*] Instantiating Pretrained VLA model")
# print("[*] Loading in F16 with Flash-Attention Enabled")
print("[*] Loading in BF16 with Flash-Attention Enabled")
AutoConfig.register("openvla", OpenVLAConfig)
AutoImageProcessor.register(OpenVLAConfig, PrismaticImageProcessor)
AutoProcessor.register(OpenVLAConfig, PrismaticProcessor)
AutoModelForVision2Seq.register(OpenVLAConfig, OpenVLAForActionPrediction)
base_vla = AutoModelForVision2Seq.from_pretrained(
'output_hf_model_openx',
attn_implementation="flash_attention_2",
torch_dtype=torch.bfloat16,
# torch_dtype=torch.float16,
load_in_8bit=cfg.load_in_8bit,
load_in_4bit=cfg.load_in_4bit,
low_cpu_mem_usage=True,
trust_remote_code=False,
).to('cuda')
print("Loaded base.")
base_vla = PeftModel.from_pretrained(base_vla, cfg.pretrained_checkpoint)
base_vla = base_vla.merge_and_unload()
print("Merged CustomOpenVLA LLM LoRA.")
if cfg.slot_type == 'oc':
vla = EmbodiedObject_LangSlot(base_model=base_vla, number_of_slots = cfg.number_of_slots)
elif cfg.slot_type == 'orc':
vla = EmbodiedRelation_LangSlot(base_model=base_vla, number_of_slots = cfg.number_of_slots)
weights = load_file(cfg.custom_param_checkpoint)
vla.load_state_dict(weights, strict=False)
vla.object_centric_tokenizer.requires_grad_(False)
vla.object_centric_bbox_head.requires_grad_(False)
vla.object_centric_mask_head.requires_grad_(False)
vla.requires_grad_(False)
vla = vla.to('cuda')
# Move model to device.
# Note: `.to()` is not supported for 8-bit or 4-bit bitsandbytes models, but the model will
# already be set to the right devices and casted to the correct dtype upon loading.
if not cfg.load_in_8bit and not cfg.load_in_4bit:
vla = vla.to('cuda')
# Load dataset stats used during finetuning (for action un-normalization).
dataset_statistics_path = os.path.join(os.path.dirname(cfg.custom_param_checkpoint), "dataset_statistics.json")
if os.path.isfile(dataset_statistics_path):
with open(dataset_statistics_path, "r") as f:
norm_stats = json.load(f)
vla.norm_stats = norm_stats
else:
print(
"WARNING: No local dataset_statistics.json file found for current checkpoint.\n"
"You can ignore this if you are loading the base VLA (i.e. not fine-tuned) checkpoint."
"Otherwise, you may run into errors when trying to call `predict_action()` due to an absent `unnorm_key`."
)
return vla
def get_model(cfg, wrap_diffusion_policy_for_droid=False):
"""Load model for evaluation."""
if cfg.model_family == "openvla" or cfg.model_family == "customvla" or cfg.model_family == 'objectvla':
model = get_vla(cfg)
else:
raise ValueError("Unexpected `model_family` found in config.")
print(f"Loaded model: {type(model)}")
return model
def visualize_all(visuals):
# Get the corresponding visualizations
row1 = [
visuals['exo_rgb'], # 'exo rgb'
visuals['exo_rgb_boxed'], # 'exo rgb'
visuals['exo_rgb_boxed_contact'], # 'exo rgb'
visuals['exo_depth'], # 'exo depth'
visuals['exo_seg'], # 'exo seg'
]
row2 = [
visuals['ego_rgb'], # 'ego rgb'
visuals['ego_rgb_boxed'], # 'ego rgb'
visuals['ego_rgb_boxed_contact'], # 'ego rgb'
visuals['ego_depth'], # 'ego depth'
visuals['ego_seg'], # 'ego seg'
]
# Function to preprocess images (convert grayscale to RGB and resize)
def preprocess_image(image, target_size=(300, 300)):
if len(image.shape) == 2: # Convert grayscale to RGB
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, target_size)
return image
# Process all images to uniform size and color format and concatenate horizontally
row1_concat = np.hstack([preprocess_image(img) for img in row1])
row2_concat = np.hstack([preprocess_image(img) for img in row2])
# Concatenate vertically to form a 2x4 grid
final_image = np.vstack([row1_concat, row2_concat])
cv2.imshow('data', final_image)
from dataclasses import dataclass, field
def get_libero_dummy_action(model_family: str):
"""Get dummy/no-op action, used to roll out the simulation while the robot does nothing."""
return [0, 0, 0, 0, 0, 0, -1]
@dataclass
class GenerateConfig:
# fmt: off
#################################################################################################################
# Model-specific parameters
#################################################################################################################
model_family: str = "openvla" # Model family
slot_type: str = "orc" # orc
number_of_slots: int = 16 # number of slots in the model
pretrained_checkpoint: Union[str, Path] = "./orc_model" # Pretrained checkpoint path
custom_param_checkpoint: Union[str, Path] = "./orc_model/relate_object_bboxes_w_mask_actionable_s16h1.safetensors" # Pretrained checkpoint path
load_in_8bit: bool = False # (For OpenVLA only) Load with 8-bit quantization
load_in_4bit: bool = False # (For OpenVLA only) Load with 4-bit quantization
center_crop: bool = False # Center crop? (if trained w/ random crop image aug)
#################################################################################################################
# LIBERO environment-specific parameters
#################################################################################################################
task_suite_name: str = "real_data" # Task suite. Options: libero_spatial, libero_object, libero_goal, libero_10, libero_90
num_steps_wait: int = 20 # Number of steps to wait for objects to stabilize in sim
num_trials_per_task: int = 20 # Number of rollouts per task
#################################################################################################################
# Utils
#################################################################################################################
run_id_note: Optional[str] = None # Extra note to add in run ID for logging
local_log_dir: str = "./experiments/logs" # Local directory for eval logs
saved_dir: str = "libero_goal_01_slots" # Libero goal slots
use_wandb: bool = False # Whether to also log results in Weights & Biases
wandb_project: str = "YOUR_WANDB_PROJECT" # Name of W&B project to log to (use default!)
wandb_entity: str = "YOUR_WANDB_ENTITY" # Name of entity to log under
seed: int = 7 # Random Seed (for reproducibility)
writing_extra_waits: bool = False
# fmt: on
import re
def natural_key(s):
return [int(text) if text.isdigit() else text for text in re.split(r'(\d+)', s)]
import cv2
import pickle
IMAGE_RESOLUTION = 256
from PIL import Image
def process_inputs(processor, pixel_values, device):
image = Image.fromarray(pixel_values)
image = image.convert("RGB")
# Build VLA prompt
action_dim = 7; future_horizon = 5
prompt = f"In: What action should the robot take?\nOut: "
placeholder_seg = "_ _ _ _ _ _ _ _"
for fidx in range(future_horizon-1):
placeholder_seg += " _ _ _ _ _ _ _"
prompt = prompt + placeholder_seg + "</s>"
# Process inputs.
inputs = processor(prompt, image).to(device, dtype=torch.float16)
inputs['input_ids'][0][-2-(action_dim*future_horizon)] = 29871
# for key, value in inputs.items():
# print(key, value.shape)
# 1/0
return inputs
def set_seed_everywhere(seed: int):
"""Sets the random seed for Python, NumPy, and PyTorch functions."""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["PYTHONHASHSEED"] = str(seed)
def get_image_resize_size(cfg):
"""
Gets image resize size for a model class.
If `resize_size` is an int, then the resized image will be a square.
Else, the image will be a rectangle.
"""
if cfg.model_family == "openvla" or cfg.model_family == "customvla" or cfg.model_family == 'objectvla':
resize_size = 224
else:
raise ValueError("Unexpected `model_family` found in config.")
return resize_size
def get_current_slots(vla, batch, task_texts, in_past_tokens, device_id='cuda'):
pixel_values = batch["pixel_values"].to(torch.bfloat16).to(device_id)
outputs = vla.get_obj_slots(pixel_values, task_texts, in_past_tokens)
return outputs
def get_normalized_actions(vla, batch, slot_outputs, device_id):
# get object-centric dynamics
patch_features = slot_outputs['patch_features']
slotted_features = slot_outputs['visual_tokens']
if 'interactable_features' in slot_outputs:
slotted_features, _ = vla.select_top_k_slots(
slotted_features, slot_outputs['interactable_features'], k=4)
clip_embeddings = slot_outputs['texts']
clip_attention_mask = torch.logical_not(slot_outputs['texts_attn'])
llama_input_ids = batch["input_ids"].to(device_id)
llama_attention_mask = batch["attention_mask"].to(device_id)
continuous_actions_pred = vla.decode_continuous_actions(
patch_features=patch_features,
slotted_features=slotted_features,
clip_embeddings=clip_embeddings,
clip_attention_mask=clip_attention_mask,
llama_input_ids=llama_input_ids,
llama_attention_mask=llama_attention_mask,
llama_labels=None
) # will return output of [cross-modality-slots, cross-modality-bboxes]
action_chunk, action_dim = continuous_actions_pred.shape[1:]
return continuous_actions_pred[:,0,:]
def denormalize_actions(model, unnorm_key, normalized_actions):
# Unnormalize actions
action_norm_stats = model.get_action_stats(unnorm_key)
mask = action_norm_stats.get("mask", np.ones_like(action_norm_stats["q01"], dtype=bool))
action_high, action_low = np.array(action_norm_stats["q99"]), np.array(action_norm_stats["q01"])
actions = np.where(
mask,
0.5 * (normalized_actions + 1) * (action_high - action_low) + action_low,
normalized_actions,
)
return actions
def normalize_gripper_action(action, binarize=True):
"""
Changes gripper action (last dimension of action vector) from [0,1] to [-1,+1].
Necessary for some environments (not Bridge) because the dataset wrapper standardizes gripper actions to [0,1].
Note that unlike the other action dimensions, the gripper action is not normalized to [-1,+1] by default by
the dataset wrapper.
Normalization formula: y = 2 * (x - orig_low) / (orig_high - orig_low) - 1
"""
# Just normalize the last action to [-1,+1].
orig_low, orig_high = 0.0, 1.0
action[..., -1] = 2 * (action[..., -1] - orig_low) / (orig_high - orig_low) - 1
if binarize:
# Binarize to -1 or +1.
action[..., -1] = np.sign(action[..., -1])
return action
# from libero.libero import benchmark
@draccus.wrap()
def eval_libero(cfg: GenerateConfig) -> None:
assert cfg.pretrained_checkpoint is not None, "cfg.pretrained_checkpoint must not be None!"
# if "image_aug" in cfg.pretrained_checkpoint:
# assert cfg.center_crop, "Expecting `center_crop==True` because model was trained with image augmentations!"
assert not (cfg.load_in_8bit and cfg.load_in_4bit), "Cannot use both 8-bit and 4-bit quantization!"
# Set random seed
set_seed_everywhere(cfg.seed)
# [OpenVLA] Set action un-normalization key
cfg.unnorm_key = cfg.task_suite_name
# # Load model
model = get_model(cfg)
# [OpenVLA] Check that the model contains the action un-normalization key
if cfg.model_family == "openvla":
# In some cases, the key must be manually modified (e.g. after training on a modified version of the dataset
# with the suffix "_no_noops" in the dataset name)
if cfg.unnorm_key not in model.norm_stats and f"{cfg.unnorm_key}_no_noops" in model.norm_stats:
cfg.unnorm_key = f"{cfg.unnorm_key}_no_noops"
assert cfg.unnorm_key in model.norm_stats, f"Action un-norm key {cfg.unnorm_key} not found in VLA `norm_stats`!"
# [OpenVLA] Get Hugging Face processor
processor = None
if cfg.model_family == "openvla":
processor = get_processor(cfg)
# Initialize local logging
run_id = f"EVAL-{cfg.task_suite_name}-{cfg.model_family}-{DATE_TIME}"
if cfg.run_id_note is not None:
run_id += f"--{cfg.run_id_note}"
os.makedirs(cfg.local_log_dir, exist_ok=True)
local_log_filepath = os.path.join(cfg.local_log_dir, run_id + ".txt")
log_file = open(local_log_filepath, "w")
print(f"Logging to local log file: {local_log_filepath}")
# Initialize LIBERO task suite
benchmark_dict = benchmark.get_benchmark_dict()
task_suite = benchmark_dict[cfg.task_suite_name]()
num_tasks_in_suite = task_suite.n_tasks
print(f"Task suite: {cfg.task_suite_name}")
log_file.write(f"Task suite: {cfg.task_suite_name}\n")
# Get expected image dimensions
resize_size = get_image_resize_size(cfg)
# Start evaluation
total_episodes, total_successes = 0, 0
extra_waits_dict = {}
for task_id in tqdm.tqdm(range(num_tasks_in_suite)):
# Get task
task = task_suite.get_task(task_id)
# Get default LIBERO initial states
initial_states = task_suite.get_task_init_states(task_id)
# Initialize LIBERO environment and task description
env, task_description = get_libero_env(task, cfg.model_family, resolution=256)
# Start episodes
task_episodes, task_successes = 0, 0
extra_waits = []
for episode_idx in tqdm.tqdm(range(cfg.num_trials_per_task)):
print(f"\nTask: {task_description}")
log_file.write(f"\nTask: {task_description}\n")
# Reset environment
env.reset()
# Set initial states
obs = env.set_init_state(initial_states[episode_idx])
# Setup
t = 0
replay_images = []
if cfg.task_suite_name == "libero_spatial":
max_steps = 220 # longest training demo has 193 steps
elif cfg.task_suite_name == "libero_object":
max_steps = 280 # longest training demo has 254 steps
elif cfg.task_suite_name == "libero_goal":
max_steps = 300 # longest training demo has 270 steps
elif cfg.task_suite_name == "libero_10":
max_steps = 520 # longest training demo has 505 steps
elif cfg.task_suite_name == "libero_90":
max_steps = 400 # longest training demo has 373 steps
t = 0
while t < cfg.num_steps_wait:
# try:
# IMPORTANT: Do nothing for the first few timesteps because the simulator drops objects
# and we need to wait for them to fall
obs, reward, done, info = env.step(get_libero_dummy_action(cfg.model_family))
t += 1
# Process observations and task
task_texts = [task_description]
texts = ['robot ' + task_description]
img = get_libero_image(obs, resize_size)
img = img[:,::-1,:]
batch_data = process_inputs(processor=processor, pixel_values=img, device='cuda')
horizon = 1
batch_data['pixel_values'] = batch_data['pixel_values'].unsqueeze(1).repeat(1,horizon,1,1,1)
# Replicate frames across temporal dimension for initialization purposes
# Get the object-centric slot embeddings
with torch.autocast("cuda", dtype=torch.bfloat16):
slot_outputs = get_current_slots(model, batch_data, texts, in_past_tokens=None, device_id='cuda')
# Query model to get several prelim actions
with torch.autocast("cuda", dtype=torch.bfloat16):
actions = get_normalized_actions(model, batch_data, slot_outputs, device_id='cuda')
actions = actions.detach().float().cpu().numpy()
unnormalized_actions = denormalize_actions(model, cfg.unnorm_key, actions)
## Start running the rest of the model here
while t < max_steps + cfg.num_steps_wait:
# Get preprocessed image
img = get_libero_image(obs, resize_size)
img = img[:,::-1,:]
batch_data = process_inputs(processor=processor, pixel_values=img, device='cuda')
# pixel_values = batch_data['pixel_values'] # torch.Size([1, 6, 224, 224])
# input_ids = batch_data['input_ids'] # torch.Size([1, 50])
# attention_mask = batch_data['attention_mask'] # torch.Size([1, 50])
# Save preprocessed image for replay video
replay_images.append(img)
# Getting frames across temporal dimension for initialization purposes
batch_data['pixel_values'] = batch_data['pixel_values'].unsqueeze(1)
# Get the object-centric slot embeddings
with torch.autocast("cuda", dtype=torch.bfloat16):
slot_outputs = get_current_slots(model, batch_data, texts, in_past_tokens=None, device_id='cuda')
# Query model to get several prelim actions
with torch.autocast("cuda", dtype=torch.bfloat16):
actions = get_normalized_actions(model, batch_data, slot_outputs, device_id='cuda')
actions = actions.detach().float().cpu().numpy()
unnormalized_actions = denormalize_actions(model, cfg.unnorm_key, actions)
action = unnormalized_actions[0]
# Normalize gripper action [0,1] -> [-1,+1] because the environment expects the latter
action = normalize_gripper_action(action, binarize=True)
# [OpenVLA] The dataloader flips the sign of the gripper action to align with other datasets
# (0 = close, 1 = open), so flip it back (-1 = open, +1 = close) before executing the action
# if cfg.model_family == "openvla":
# action = invert_gripper_action(action)
# Execute action in environment
obs, reward, done, info = env.step(action.tolist())
t += 1
if done:
task_successes += 1
total_successes += 1
break
task_episodes += 1
total_episodes += 1
# Save a replay video of the episode
cv2.destroyAllWindows()
save_rollout_video(
replay_images, total_episodes, success=done, task_description=task_description, log_file=log_file, saved_dir=cfg.saved_dir
)
# # Log current results
print(f"Success: {done}")
print(f"# episodes completed so far: {total_episodes}")
print(f"# successes: {total_successes} ({total_successes / total_episodes * 100:.1f}%)")
log_file.write(f"Success: {done}\n")
log_file.write(f"# episodes completed so far: {total_episodes}\n")
log_file.write(f"# successes: {total_successes} ({total_successes / total_episodes * 100:.1f}%)\n")
log_file.flush()
# # 1/0
# 1/0
# Log final results
print(f"Current task success rate: {float(task_successes) / float(task_episodes)}")
print(f"Current total success rate: {float(total_successes) / float(total_episodes)}")
log_file.write(f"Current task success rate: {float(task_successes) / float(task_episodes)}\n")
log_file.write(f"Current total success rate: {float(total_successes) / float(total_episodes)}\n")
log_file.flush()
# Save local log file
log_file.close()
if __name__ == "__main__":
eval_libero()