|
|
--- |
|
|
library_name: transformers |
|
|
pipeline_tag: image-text-to-text |
|
|
inference: true |
|
|
widget: |
|
|
- text: Hello! |
|
|
example_title: Hello world |
|
|
group: Python |
|
|
base_model: |
|
|
- ServiceNow-AI/Apriel-1.5-15b-Thinker |
|
|
--- |
|
|
|
|
|
This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [ServiceNow-AI/Apriel-1.5-15b-Thinker](https://huggingface.co/ServiceNow-AI/Apriel-1.5-15b-Thinker). |
|
|
|
|
|
### Example usage: |
|
|
|
|
|
```python |
|
|
import re |
|
|
import requests |
|
|
import torch |
|
|
from PIL import Image |
|
|
from transformers import AutoProcessor, AutoModelForImageTextToText |
|
|
|
|
|
# Load model |
|
|
model_id = "tiny-random/apriel-1.5" |
|
|
model = AutoModelForImageTextToText.from_pretrained( |
|
|
model_id, |
|
|
dtype=torch.bfloat16, |
|
|
device_map="auto" |
|
|
) |
|
|
processor = AutoProcessor.from_pretrained(model_id) |
|
|
url = "https://picsum.photos/id/237/200/300" |
|
|
image = Image.open(requests.get(url, stream=True).raw).convert("RGB") |
|
|
|
|
|
chat = [ |
|
|
{ |
|
|
"role": "user", |
|
|
"content": [ |
|
|
{"type": "text", "text": "Which animal is this?"}, |
|
|
{"type": "image"}, |
|
|
], |
|
|
} |
|
|
] |
|
|
prompt = processor.apply_chat_template(chat, add_generation_prompt=True, tokenize=False) |
|
|
inputs = processor(text=prompt, images=[image], return_tensors="pt").to(model.device) |
|
|
inputs.pop("token_type_ids", None) |
|
|
inputs['pixel_values'] = inputs['pixel_values'].to(model.dtype) |
|
|
|
|
|
with torch.no_grad(): |
|
|
output_ids = model.generate(**inputs, max_new_tokens=128, do_sample=True, temperature=0.6) |
|
|
|
|
|
generated_ids = output_ids[:, inputs['input_ids'].shape[1]:] |
|
|
output = processor.decode(generated_ids[0], skip_special_tokens=False) |
|
|
print("Image Response:", output) |
|
|
``` |
|
|
|
|
|
### Codes to create this repo: |
|
|
|
|
|
```python |
|
|
import json |
|
|
from pathlib import Path |
|
|
|
|
|
import accelerate |
|
|
import torch |
|
|
from huggingface_hub import file_exists, hf_hub_download |
|
|
from transformers import ( |
|
|
AutoConfig, |
|
|
AutoModelForCausalLM, |
|
|
AutoProcessor, |
|
|
GenerationConfig, |
|
|
AutoModelForImageTextToText, |
|
|
set_seed, |
|
|
) |
|
|
|
|
|
source_model_id = "ServiceNow-AI/Apriel-1.5-15b-Thinker" |
|
|
save_folder = "/tmp/tiny-random/apriel-1.5" |
|
|
|
|
|
processor = AutoProcessor.from_pretrained(source_model_id, trust_remote_code=True) |
|
|
processor.save_pretrained(save_folder) |
|
|
|
|
|
with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
|
|
config_json = json.load(f) |
|
|
|
|
|
config_json['text_config'].update({ |
|
|
'head_dim': 32, |
|
|
'hidden_size': 8, |
|
|
'intermediate_size': 64, |
|
|
'num_hidden_layers': 2, |
|
|
'num_attention_heads': 8, |
|
|
'num_key_value_heads': 4, |
|
|
}) |
|
|
config_json['vision_config'].update( |
|
|
{ |
|
|
'head_dim': 32, |
|
|
'intermediate_size': 256, |
|
|
'hidden_size': 32 * 4, |
|
|
'num_attention_heads': 4, |
|
|
'num_hidden_layers': 2, |
|
|
} |
|
|
) |
|
|
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
|
|
json.dump(config_json, f, indent=2) |
|
|
|
|
|
config = AutoConfig.from_pretrained( |
|
|
save_folder, |
|
|
trust_remote_code=True, |
|
|
) |
|
|
print(config) |
|
|
torch.set_default_dtype(torch.bfloat16) |
|
|
model = AutoModelForImageTextToText.from_config(config, trust_remote_code=True).to(torch.bfloat16) |
|
|
torch.set_default_dtype(torch.float32) |
|
|
if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): |
|
|
model.generation_config = GenerationConfig.from_pretrained( |
|
|
source_model_id, trust_remote_code=True, |
|
|
) |
|
|
model.generation_config.do_sample = True |
|
|
print(model.generation_config) |
|
|
model = model.cpu() |
|
|
with torch.no_grad(): |
|
|
for name, p in sorted(model.named_parameters()): |
|
|
torch.nn.init.normal_(p, 0, 0.1) |
|
|
print(name, p.shape) |
|
|
model.save_pretrained(save_folder) |
|
|
``` |
|
|
|
|
|
### Printing the model: |
|
|
|
|
|
```text |
|
|
LlavaForConditionalGeneration( |
|
|
(model): LlavaModel( |
|
|
(vision_tower): PixtralVisionModel( |
|
|
(patch_conv): Conv2d(3, 128, kernel_size=(16, 16), stride=(16, 16), bias=False) |
|
|
(ln_pre): PixtralRMSNorm((128,), eps=1e-05) |
|
|
(transformer): PixtralTransformer( |
|
|
(layers): ModuleList( |
|
|
(0-1): 2 x PixtralAttentionLayer( |
|
|
(attention_norm): PixtralRMSNorm((128,), eps=1e-05) |
|
|
(feed_forward): PixtralMLP( |
|
|
(gate_proj): Linear(in_features=128, out_features=256, bias=False) |
|
|
(up_proj): Linear(in_features=128, out_features=256, bias=False) |
|
|
(down_proj): Linear(in_features=256, out_features=128, bias=False) |
|
|
(act_fn): SiLU() |
|
|
) |
|
|
(attention): PixtralAttention( |
|
|
(k_proj): Linear(in_features=128, out_features=128, bias=False) |
|
|
(v_proj): Linear(in_features=128, out_features=128, bias=False) |
|
|
(q_proj): Linear(in_features=128, out_features=128, bias=False) |
|
|
(o_proj): Linear(in_features=128, out_features=128, bias=False) |
|
|
) |
|
|
(ffn_norm): PixtralRMSNorm((128,), eps=1e-05) |
|
|
) |
|
|
) |
|
|
) |
|
|
(patch_positional_embedding): PixtralRotaryEmbedding() |
|
|
) |
|
|
(multi_modal_projector): LlavaMultiModalProjector( |
|
|
(linear_1): Linear(in_features=128, out_features=8, bias=True) |
|
|
(act): GELUActivation() |
|
|
(linear_2): Linear(in_features=8, out_features=8, bias=True) |
|
|
) |
|
|
(language_model): MistralModel( |
|
|
(embed_tokens): Embedding(131072, 8) |
|
|
(layers): ModuleList( |
|
|
(0-1): 2 x MistralDecoderLayer( |
|
|
(self_attn): MistralAttention( |
|
|
(q_proj): Linear(in_features=8, out_features=256, bias=False) |
|
|
(k_proj): Linear(in_features=8, out_features=128, bias=False) |
|
|
(v_proj): Linear(in_features=8, out_features=128, bias=False) |
|
|
(o_proj): Linear(in_features=256, out_features=8, bias=False) |
|
|
) |
|
|
(mlp): MistralMLP( |
|
|
(gate_proj): Linear(in_features=8, out_features=64, bias=False) |
|
|
(up_proj): Linear(in_features=8, out_features=64, bias=False) |
|
|
(down_proj): Linear(in_features=64, out_features=8, bias=False) |
|
|
(act_fn): SiLU() |
|
|
) |
|
|
(input_layernorm): MistralRMSNorm((8,), eps=1e-05) |
|
|
(post_attention_layernorm): MistralRMSNorm((8,), eps=1e-05) |
|
|
) |
|
|
) |
|
|
(norm): MistralRMSNorm((8,), eps=1e-05) |
|
|
(rotary_emb): MistralRotaryEmbedding() |
|
|
) |
|
|
) |
|
|
(lm_head): Linear(in_features=8, out_features=131072, bias=False) |
|
|
) |
|
|
``` |