| | ''' |
| | * Copyright (c) 2022, salesforce.com, inc. |
| | * All rights reserved. |
| | * SPDX-License-Identifier: BSD-3-Clause |
| | * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause |
| | * By Junnan Li |
| | ''' |
| | import warnings |
| | warnings.filterwarnings("ignore") |
| |
|
| | from .blip_vit import VisionTransformer, interpolate_pos_embed |
| | from .blip_med import BertConfig, BertModel, BertLMHeadModel |
| | from transformers import BertTokenizer |
| |
|
| | import torch |
| | from torch import nn |
| | import torch.nn.functional as F |
| |
|
| | import os |
| | from urllib.parse import urlparse |
| | from timm.models.hub import download_cached_file |
| | import numpy as np |
| |
|
| | from pathlib import Path |
| | LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) |
| |
|
| | |
| |
|
| | class BLIP_Base(nn.Module): |
| | def __init__(self, |
| | med_config = Path(LOCAL_PATH, 'blip_configs/med_config.json'), |
| | image_size = 224, |
| | vit = 'base', |
| | vit_grad_ckpt = False, |
| | vit_ckpt_layer = 0, |
| | ): |
| | """ |
| | Args: |
| | med_config (str): path for the mixture of encoder-decoder model's configuration file |
| | image_size (int): input image size |
| | vit (str): model size of vision transformer |
| | """ |
| | super().__init__() |
| | |
| | self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) |
| | self.tokenizer = init_tokenizer() |
| | med_config = BertConfig.from_json_file(med_config) |
| | med_config.encoder_width = vision_width |
| | self.text_encoder = BertModel(config=med_config, add_pooling_layer=False) |
| |
|
| | |
| | def forward(self, image, caption, mode): |
| | |
| | assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal" |
| | text = self.tokenizer(caption, return_tensors="pt").to(image.device) |
| | |
| | if mode=='image': |
| | |
| | image_embeds = self.visual_encoder(image) |
| | return image_embeds |
| | |
| | elif mode=='text': |
| | |
| | text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask, |
| | return_dict = True, mode = 'text') |
| | return text_output.last_hidden_state |
| | |
| | elif mode=='multimodal': |
| | |
| | image_embeds = self.visual_encoder(image) |
| | image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) |
| | |
| | text.input_ids[:,0] = self.tokenizer.enc_token_id |
| | output = self.text_encoder(text.input_ids, |
| | attention_mask = text.attention_mask, |
| | encoder_hidden_states = image_embeds, |
| | encoder_attention_mask = image_atts, |
| | return_dict = True, |
| | ) |
| | return output.last_hidden_state |
| | |
| | |
| | |
| | class BLIP_Decoder(nn.Module): |
| | def __init__(self, |
| | med_config = Path(LOCAL_PATH, 'blip_configs/med_config.json'), |
| | image_size = 384, |
| | vit = 'base', |
| | vit_grad_ckpt = False, |
| | vit_ckpt_layer = 0, |
| | prompt = 'a picture of ', |
| | ): |
| | """ |
| | Args: |
| | med_config (str): path for the mixture of encoder-decoder model's configuration file |
| | image_size (int): input image size |
| | vit (str): model size of vision transformer |
| | """ |
| | super().__init__() |
| | |
| | self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) |
| | self.tokenizer = init_tokenizer() |
| | med_config = BertConfig.from_json_file(med_config) |
| | med_config.encoder_width = vision_width |
| | self.text_decoder = BertLMHeadModel(config=med_config) |
| | |
| | self.prompt = prompt |
| | self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1 |
| |
|
| | |
| | def forward(self, image, caption): |
| | |
| | image_embeds = self.visual_encoder(image) |
| | image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) |
| | |
| | text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device) |
| | |
| | text.input_ids[:,0] = self.tokenizer.bos_token_id |
| | |
| | decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100) |
| | decoder_targets[:,:self.prompt_length] = -100 |
| | |
| | decoder_output = self.text_decoder(text.input_ids, |
| | attention_mask = text.attention_mask, |
| | encoder_hidden_states = image_embeds, |
| | encoder_attention_mask = image_atts, |
| | labels = decoder_targets, |
| | return_dict = True, |
| | ) |
| | loss_lm = decoder_output.loss |
| | |
| | return loss_lm |
| | |
| | def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0): |
| | image_embeds = self.visual_encoder(image) |
| |
|
| | if not sample: |
| | image_embeds = image_embeds.repeat_interleave(num_beams,dim=0) |
| | |
| | image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) |
| | model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts} |
| | |
| | prompt = [self.prompt] * image.size(0) |
| | input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device) |
| | input_ids[:,0] = self.tokenizer.bos_token_id |
| | input_ids = input_ids[:, :-1] |
| |
|
| | if sample: |
| | |
| | outputs = self.text_decoder.generate(input_ids=input_ids, |
| | max_length=max_length, |
| | min_length=min_length, |
| | do_sample=True, |
| | top_p=top_p, |
| | num_return_sequences=1, |
| | eos_token_id=self.tokenizer.sep_token_id, |
| | pad_token_id=self.tokenizer.pad_token_id, |
| | repetition_penalty=1.1, |
| | **model_kwargs) |
| | else: |
| | |
| | outputs = self.text_decoder.generate(input_ids=input_ids, |
| | max_length=max_length, |
| | min_length=min_length, |
| | num_beams=num_beams, |
| | eos_token_id=self.tokenizer.sep_token_id, |
| | pad_token_id=self.tokenizer.pad_token_id, |
| | repetition_penalty=repetition_penalty, |
| | **model_kwargs) |
| | |
| | captions = [] |
| | for output in outputs: |
| | caption = self.tokenizer.decode(output, skip_special_tokens=True) |
| | captions.append(caption[len(self.prompt):]) |
| | return captions |
| | |
| |
|
| | def blip_decoder(pretrained='',**kwargs): |
| | model = BLIP_Decoder(**kwargs) |
| | if pretrained: |
| | model,msg = load_checkpoint(model,pretrained) |
| | assert(len(msg.missing_keys)==0) |
| | return model |
| | |
| | def blip_feature_extractor(pretrained='',**kwargs): |
| | model = BLIP_Base(**kwargs) |
| | if pretrained: |
| | model,msg = load_checkpoint(model,pretrained) |
| | assert(len(msg.missing_keys)==0) |
| | return model |
| |
|
| | def init_tokenizer(): |
| | tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') |
| | tokenizer.add_special_tokens({'bos_token':'[DEC]'}) |
| | tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']}) |
| | tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] |
| | return tokenizer |
| |
|
| |
|
| | def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): |
| | |
| | assert vit in ['base', 'large'], "vit parameter must be base or large" |
| | if vit=='base': |
| | vision_width = 768 |
| | visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, |
| | num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, |
| | drop_path_rate=0 or drop_path_rate |
| | ) |
| | elif vit=='large': |
| | vision_width = 1024 |
| | visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, |
| | num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, |
| | drop_path_rate=0.1 or drop_path_rate |
| | ) |
| | return visual_encoder, vision_width |
| |
|
| | def is_url(url_or_filename): |
| | parsed = urlparse(url_or_filename) |
| | return parsed.scheme in ("http", "https") |
| |
|
| | def load_checkpoint(model,url_or_filename): |
| | if is_url(url_or_filename): |
| | cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) |
| | checkpoint = torch.load(cached_file, map_location='cpu') |
| | elif os.path.isfile(url_or_filename): |
| | checkpoint = torch.load(url_or_filename, map_location='cpu') |
| | else: |
| | raise RuntimeError('checkpoint url or path is invalid') |
| | |
| | state_dict = checkpoint['model'] |
| | |
| | state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder) |
| | if 'visual_encoder_m.pos_embed' in model.state_dict().keys(): |
| | state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], |
| | model.visual_encoder_m) |
| | for key in model.state_dict().keys(): |
| | if key in state_dict.keys(): |
| | if state_dict[key].shape!=model.state_dict()[key].shape: |
| | del state_dict[key] |
| | |
| | msg = model.load_state_dict(state_dict,strict=False) |
| | print('load checkpoint from %s'%url_or_filename) |
| | return model,msg |
| | |
| | |
| |
|
| | class BLIP_VQA(nn.Module): |
| | def __init__(self, |
| | med_config = Path(LOCAL_PATH, 'blip_configs/med_config.json'), |
| | image_size = 480, |
| | vit = 'base', |
| | vit_grad_ckpt = False, |
| | vit_ckpt_layer = 0, |
| | ): |
| | """ |
| | Args: |
| | med_config (str): path for the mixture of encoder-decoder model's configuration file |
| | image_size (int): input image size |
| | vit (str): model size of vision transformer |
| | """ |
| | super().__init__() |
| | |
| | self.visual_encoder, vision_width = create_vit(vit, image_size, vit_grad_ckpt, vit_ckpt_layer, drop_path_rate=0.1) |
| | self.tokenizer = init_tokenizer() |
| | |
| | encoder_config = BertConfig.from_json_file(med_config) |
| | encoder_config.encoder_width = vision_width |
| | self.text_encoder = BertModel(config=encoder_config, add_pooling_layer=False) |
| | |
| | decoder_config = BertConfig.from_json_file(med_config) |
| | self.text_decoder = BertLMHeadModel(config=decoder_config) |
| |
|
| |
|
| | def forward(self, image, question, answer=None, n=None, weights=None, train=True, inference='rank', k_test=128): |
| | |
| | image_embeds = self.visual_encoder(image) |
| | image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) |
| | |
| | question = self.tokenizer(question, padding='longest', truncation=True, max_length=35, |
| | return_tensors="pt").to(image.device) |
| | question.input_ids[:,0] = self.tokenizer.enc_token_id |
| | |
| | if train: |
| | ''' |
| | n: number of answers for each question |
| | weights: weight for each answer |
| | ''' |
| | answer = self.tokenizer(answer, padding='longest', return_tensors="pt").to(image.device) |
| | answer.input_ids[:,0] = self.tokenizer.bos_token_id |
| | answer_targets = answer.input_ids.masked_fill(answer.input_ids == self.tokenizer.pad_token_id, -100) |
| |
|
| | question_output = self.text_encoder(question.input_ids, |
| | attention_mask = question.attention_mask, |
| | encoder_hidden_states = image_embeds, |
| | encoder_attention_mask = image_atts, |
| | return_dict = True) |
| |
|
| | question_states = [] |
| | question_atts = [] |
| | for b, n in enumerate(n): |
| | question_states += [question_output.last_hidden_state[b]]*n |
| | question_atts += [question.attention_mask[b]]*n |
| | question_states = torch.stack(question_states,0) |
| | question_atts = torch.stack(question_atts,0) |
| |
|
| | answer_output = self.text_decoder(answer.input_ids, |
| | attention_mask = answer.attention_mask, |
| | encoder_hidden_states = question_states, |
| | encoder_attention_mask = question_atts, |
| | labels = answer_targets, |
| | return_dict = True, |
| | reduction = 'none', |
| | ) |
| | |
| | loss = weights * answer_output.loss |
| | loss = loss.sum()/image.size(0) |
| |
|
| | return loss |
| | |
| |
|
| | else: |
| | question_output = self.text_encoder(question.input_ids, |
| | attention_mask = question.attention_mask, |
| | encoder_hidden_states = image_embeds, |
| | encoder_attention_mask = image_atts, |
| | return_dict = True) |
| | |
| | if inference=='generate': |
| | num_beams = 3 |
| | question_states = question_output.last_hidden_state.repeat_interleave(num_beams,dim=0) |
| | question_atts = torch.ones(question_states.size()[:-1],dtype=torch.long).to(question_states.device) |
| | model_kwargs = {"encoder_hidden_states": question_states, "encoder_attention_mask":question_atts} |
| | |
| | bos_ids = torch.full((image.size(0),1),fill_value=self.tokenizer.bos_token_id,device=image.device) |
| | |
| | outputs = self.text_decoder.generate(input_ids=bos_ids, |
| | max_length=10, |
| | min_length=1, |
| | num_beams=num_beams, |
| | eos_token_id=self.tokenizer.sep_token_id, |
| | pad_token_id=self.tokenizer.pad_token_id, |
| | **model_kwargs) |
| | |
| | answers = [] |
| | for output in outputs: |
| | answer = self.tokenizer.decode(output, skip_special_tokens=True) |
| | answers.append(answer) |
| | return answers |
| | |
| | elif inference=='rank': |
| | max_ids = self.rank_answer(question_output.last_hidden_state, question.attention_mask, |
| | answer.input_ids, answer.attention_mask, k_test) |
| | return max_ids |
| | |
| | |
| | |
| | def rank_answer(self, question_states, question_atts, answer_ids, answer_atts, k): |
| | |
| | num_ques = question_states.size(0) |
| | start_ids = answer_ids[0,0].repeat(num_ques,1) |
| | |
| | start_output = self.text_decoder(start_ids, |
| | encoder_hidden_states = question_states, |
| | encoder_attention_mask = question_atts, |
| | return_dict = True, |
| | reduction = 'none') |
| | logits = start_output.logits[:,0,:] |
| | |
| | |
| | |
| | answer_first_token = answer_ids[:,1] |
| | prob_first_token = F.softmax(logits,dim=1).index_select(dim=1, index=answer_first_token) |
| | topk_probs, topk_ids = prob_first_token.topk(k,dim=1) |
| | |
| | |
| | input_ids = [] |
| | input_atts = [] |
| | for b, topk_id in enumerate(topk_ids): |
| | input_ids.append(answer_ids.index_select(dim=0, index=topk_id)) |
| | input_atts.append(answer_atts.index_select(dim=0, index=topk_id)) |
| | input_ids = torch.cat(input_ids,dim=0) |
| | input_atts = torch.cat(input_atts,dim=0) |
| |
|
| | targets_ids = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100) |
| |
|
| | |
| | question_states = tile(question_states, 0, k) |
| | question_atts = tile(question_atts, 0, k) |
| | |
| | output = self.text_decoder(input_ids, |
| | attention_mask = input_atts, |
| | encoder_hidden_states = question_states, |
| | encoder_attention_mask = question_atts, |
| | labels = targets_ids, |
| | return_dict = True, |
| | reduction = 'none') |
| | |
| | log_probs_sum = -output.loss |
| | log_probs_sum = log_probs_sum.view(num_ques,k) |
| |
|
| | max_topk_ids = log_probs_sum.argmax(dim=1) |
| | max_ids = topk_ids[max_topk_ids>=0,max_topk_ids] |
| |
|
| | return max_ids |
| | |
| | |
| | def blip_vqa(pretrained='',**kwargs): |
| | model = BLIP_VQA(**kwargs) |
| | if pretrained: |
| | model,msg = load_checkpoint(model,pretrained) |
| | |
| | return model |
| |
|
| |
|
| | def tile(x, dim, n_tile): |
| | init_dim = x.size(dim) |
| | repeat_idx = [1] * x.dim() |
| | repeat_idx[dim] = n_tile |
| | x = x.repeat(*(repeat_idx)) |
| | order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])) |
| | return torch.index_select(x, dim, order_index.to(x.device)) |
| | |
| | |
| |
|