|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os
|
|
|
import warnings
|
|
|
import shutil
|
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
|
|
|
import torch
|
|
|
from objectrelator.model import *
|
|
|
|
|
|
from objectrelator.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
|
|
|
from objectrelator.train.train_datasets import get_mask_config
|
|
|
from objectrelator.model.language_model.llava_phi import PSALM, PSALMForDAVISEval, ObjectRelator
|
|
|
|
|
|
def load_pretrained_model(model_path, model_base, model_name, model_args, mask_config='./objectrelator/mask_config/maskformer2_swin_base_384_bs16_50ep.yaml', load_8bit=False, load_4bit=False, device_map="auto", device="cuda"):
|
|
|
|
|
|
kwargs = {"device_map": 'cpu'}
|
|
|
|
|
|
if load_8bit:
|
|
|
kwargs['load_in_8bit'] = True
|
|
|
elif load_4bit:
|
|
|
kwargs['load_in_4bit'] = True
|
|
|
kwargs['quantization_config'] = BitsAndBytesConfig(
|
|
|
load_in_4bit=True,
|
|
|
bnb_4bit_compute_dtype=torch.float16,
|
|
|
bnb_4bit_use_double_quant=True,
|
|
|
bnb_4bit_quant_type='nf4'
|
|
|
)
|
|
|
else:
|
|
|
kwargs['torch_dtype'] = torch.float16
|
|
|
|
|
|
model_map = {
|
|
|
'psalm': PSALM,
|
|
|
'psalm_video': PSALMForDAVISEval,
|
|
|
'ObjectRelator': ObjectRelator
|
|
|
}
|
|
|
mask_cfg = get_mask_config(mask_config)
|
|
|
mask_cfg.MODEL.MASK_FORMER.SEG_TASK = model_args.seg_task if hasattr(model_args, 'seg_task') else 'instance'
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
|
|
|
if model_name not in model_map:
|
|
|
raise ValueError(f"Model {model_name} is not supported. Supported models are: {list(model_map.keys())}")
|
|
|
model_map_name = model_name
|
|
|
|
|
|
print(f'current model is {model_map_name}')
|
|
|
model = model_map[model_map_name].from_pretrained(model_path, mask_decoder_cfg=mask_cfg, **kwargs)
|
|
|
|
|
|
|
|
|
vision_tower = model.get_vision_tower()
|
|
|
vision_tower.to(device=device)
|
|
|
image_processor = vision_tower.image_processor
|
|
|
|
|
|
if hasattr(model.config, "max_sequence_length"):
|
|
|
context_len = model.config.max_sequence_length
|
|
|
else:
|
|
|
context_len = 2048
|
|
|
|
|
|
return tokenizer, model, image_processor, context_len
|
|
|
|