code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
super().__init__()
a : Union[str, Any] = module
a : Any = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase__ , bias=lowerCAmelCase__ ) , nn.Linear(lowerCAmelCase__ , module.out_features , bias=lowerCAmelCase__ ) , )
a : str = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __a ( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
return self.module(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) + self.adapter(lowerCAmelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : int ="""bigscience/bloom-1b7"""
# Constant values
lowerCamelCase : Any =2.109_659_552_692_574
lowerCamelCase : Optional[int] ="""Hello my name is"""
lowerCamelCase : List[str] =set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
lowerCamelCase : List[str] =10
def __a ( self ) -> Any:
# Models and tokenizer
a : Tuple = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase ( a__ ):
def __a ( self ) -> Dict:
super().setUp()
# Models and tokenizer
a : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
def __a ( self ) -> str:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[Any]:
a : Union[str, Any] = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase__ , "quantization_config" ) )
a : Tuple = config.to_dict()
a : List[Any] = config.to_diff_dict()
a : int = config.to_json_string()
def __a ( self ) -> Tuple:
from bitsandbytes.nn import Paramsabit
a : List[str] = self.model_fpaa.get_memory_footprint()
a : int = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a : List[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __a ( self ) -> List[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __a ( self ) -> Tuple:
a : str = self.tokenizer(self.input_text , return_tensors="pt" )
a : Optional[int] = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase__ ) , self.EXPECTED_OUTPUTS )
def __a ( self ) -> Union[str, Any]:
a : Dict = BitsAndBytesConfig()
a : List[str] = True
a : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase__ , device_map="auto" )
a : str = self.tokenizer(self.input_text , return_tensors="pt" )
a : Dict = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase__ ) , self.EXPECTED_OUTPUTS )
def __a ( self ) -> Optional[Any]:
with self.assertRaises(lowerCAmelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Any = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase__ ):
a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase__ , load_in_abit=lowerCAmelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def __a ( self ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a : int = self.tokenizer(self.input_text , return_tensors="pt" )
a : Union[str, Any] = self.model_fpaa.to(torch.floataa )
a : int = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a : Optional[int] = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
a : List[str] = self.model_fpaa.float()
def __a ( self ) -> Dict:
a : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCAmelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __a ( cls ) -> Dict:
a : Optional[int] = "t5-small"
a : int = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a : List[Any] = AutoTokenizer.from_pretrained(cls.model_name )
a : List[str] = "Translate in German: Hello, my dog is cute"
def __a ( self ) -> str:
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Union[str, Any]:
from transformers import TaForConditionalGeneration
a : Dict = TaForConditionalGeneration._keep_in_fpaa_modules
a : Optional[Any] = None
# test with `t5-small`
a : Optional[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
a : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a : Any = model.generate(**lowerCAmelCase__ )
# test with `flan-t5-small`
a : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
a : Tuple = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a : Dict = model.generate(**lowerCAmelCase__ )
a : Optional[int] = modules
def __a ( self ) -> Optional[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a : str = model.generate(**lowerCAmelCase__ )
# test with `flan-t5-small`
a : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
a : int = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a : int = model.generate(**lowerCAmelCase__ )
class __UpperCamelCase ( a__ ):
def __a ( self ) -> str:
super().setUp()
# model_name
a : Union[str, Any] = "bigscience/bloom-560m"
a : Dict = "t5-small"
# Different types of model
a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# Sequence classification model
a : Tuple = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# CausalLM model
a : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
# Seq2seq model
a : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase__ , device_map="auto" )
def __a ( self ) -> int:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Any:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase ( a__ ):
def __a ( self ) -> Dict:
super().setUp()
def __a ( self ) -> int:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> List[str]:
a : int = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a : Dict = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase ( a__ ):
def __a ( self ) -> Tuple:
super().setUp()
def __a ( self ) -> int:
a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a : Optional[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a : Optional[int] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase__ ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase ( a__ ):
def __a ( self ) -> Union[str, Any]:
a : List[Any] = "facebook/opt-350m"
super().setUp()
def __a ( self ) -> Optional[int]:
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a : List[Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase__ ) ):
a : Any = LoRALayer(module.q_proj , rank=16 )
a : List[Any] = LoRALayer(module.k_proj , rank=16 )
a : Union[str, Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a : Tuple = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a : str = model.forward(**lowerCAmelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase ( a__ ):
lowerCamelCase : Dict ="""gpt2-xl"""
lowerCamelCase : Tuple =3.3_191_854_854_152_187
| 633
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
a : Optional[int] = '''naver-clova-ix/donut-base'''
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Dict:
a : Optional[Any] = DonutProcessor.from_pretrained(lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : Optional[Any] = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
a : Tuple = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
a : List[Any] = self.processor.tokenajson(lowerCAmelCase__ )
self.assertDictEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 633
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''vit'''
def __init__( self : int , lowerCamelCase_ : Any=7_68 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : int=12 , lowerCamelCase_ : Optional[Any]=30_72 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : Any=0.02 , lowerCamelCase_ : str=1e-12 , lowerCamelCase_ : Optional[int]=2_24 , lowerCamelCase_ : Tuple=16 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=16 , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Dict = encoder_stride
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
| 79
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
if latents is None:
SCREAMING_SNAKE_CASE : Tuple = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
SCREAMING_SNAKE_CASE : Dict = latents.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE : List[Any] = torch.device(f'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
SCREAMING_SNAKE_CASE : Any = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 4.0 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._execution_device
SCREAMING_SNAKE_CASE : Optional[int] = guidance_scale > 1.0
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = torch.cat(lowerCamelCase_ , dim=0 )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = torch.cat(lowerCamelCase_ , dim=0 )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Any = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : List[Any] = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[int] = negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Dict = hint.repeat_interleave(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.timesteps
SCREAMING_SNAKE_CASE : Any = self.movq.config.latent_channels
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""image_embeds""": image_embeds, """hint""": hint}
SCREAMING_SNAKE_CASE : Dict = self.unet(
sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : str = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0]
# post-processing
SCREAMING_SNAKE_CASE : List[str] = self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE : Optional[int] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : List[Any] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 79
| 1
|
from __future__ import annotations
from random import random
class _lowerCAmelCase :
def __init__( self : int , __snake_case : int | None = None ):
lowerCamelCase :Dict = value
lowerCamelCase :Any = random()
lowerCamelCase :Node | None = None
lowerCamelCase :Node | None = None
def __repr__( self : Dict ):
from pprint import pformat
if self.left is None and self.right is None:
return F"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{F"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
lowerCamelCase :List[str] = str(self.value ) + ''' '''
lowerCamelCase :Tuple = str(self.left or '''''' )
lowerCamelCase :int = str(self.right or '''''' )
return value + left + right
def _lowerCamelCase ( a_ : Node | None , a_ : int):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCamelCase , lowerCamelCase :str = split(root.left , a_)
return left, root
else:
lowerCamelCase , lowerCamelCase :Tuple = split(root.right , a_)
return root, right
def _lowerCamelCase ( a_ : Node | None , a_ : Node | None):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCamelCase :Dict = merge(left.right , a_)
return left
else:
lowerCamelCase :List[Any] = merge(a_ , right.left)
return right
def _lowerCamelCase ( a_ : Node | None , a_ : int):
lowerCamelCase :List[str] = Node(a_)
lowerCamelCase , lowerCamelCase :Tuple = split(a_ , a_)
return merge(merge(a_ , a_) , a_)
def _lowerCamelCase ( a_ : Node | None , a_ : int):
lowerCamelCase , lowerCamelCase :str = split(a_ , value - 1)
lowerCamelCase , lowerCamelCase :Optional[int] = split(a_ , a_)
return merge(a_ , a_)
def _lowerCamelCase ( a_ : Node | None):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=''',''')
inorder(root.right)
def _lowerCamelCase ( a_ : Node | None , a_ : str):
for arg in args.split():
if arg[0] == "+":
lowerCamelCase :int = insert(a_ , int(arg[1:]))
elif arg[0] == "-":
lowerCamelCase :Any = erase(a_ , int(arg[1:]))
else:
print('''Unknown command''')
return root
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''')
lowerCamelCase :List[Any] = input()
while args != "q":
lowerCamelCase :Dict = interact_treap(a_ , a_)
print(a_)
lowerCamelCase :int = input()
print('''good by!''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 166
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , __snake_case : int , __snake_case : List[str]=13 , __snake_case : int=7 , __snake_case : int=True , __snake_case : int=True , __snake_case : Dict=True , __snake_case : str=True , __snake_case : Dict=99 , __snake_case : Optional[int]=32 , __snake_case : Optional[Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : int="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Any=512 , __snake_case : Dict=16 , __snake_case : Optional[int]=2 , __snake_case : str=0.0_2 , __snake_case : int=4 , ):
lowerCamelCase :Union[str, Any] = parent
lowerCamelCase :str = batch_size
lowerCamelCase :Dict = seq_length
lowerCamelCase :int = is_training
lowerCamelCase :int = use_attention_mask
lowerCamelCase :Optional[Any] = use_token_type_ids
lowerCamelCase :int = use_labels
lowerCamelCase :List[Any] = vocab_size
lowerCamelCase :str = hidden_size
lowerCamelCase :Optional[int] = num_hidden_layers
lowerCamelCase :Tuple = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :Tuple = hidden_act
lowerCamelCase :Any = hidden_dropout_prob
lowerCamelCase :List[str] = attention_probs_dropout_prob
lowerCamelCase :Any = max_position_embeddings
lowerCamelCase :Dict = type_vocab_size
lowerCamelCase :int = type_sequence_label_size
lowerCamelCase :str = initializer_range
lowerCamelCase :Any = num_choices
def snake_case ( self : Any ):
lowerCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase :Any = None
if self.use_attention_mask:
lowerCamelCase :int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase :Dict = None
if self.use_token_type_ids:
lowerCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase :List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self : Any ):
lowerCamelCase :int = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = config_and_inputs
lowerCamelCase :str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Optional[Any] = config_and_inputs
lowerCamelCase :List[Any] = True
lowerCamelCase :Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self : Optional[int] ):
lowerCamelCase :str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
lowerCamelCase :Dict = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def snake_case ( self : Tuple ):
lowerCamelCase :List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :int = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase :List[str] = model(__snake_case )[0]
lowerCamelCase :Tuple = [1, 11, 50265]
self.assertEqual(list(output.shape ) , __snake_case )
# compare the actual values for a slice.
lowerCamelCase :Optional[Any] = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=__snake_case )
lowerCamelCase :Union[str, Any] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase :int = model(__snake_case )[0]
# compare the actual values for a slice.
lowerCamelCase :List[Any] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
| 166
| 1
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =XLMTokenizer
snake_case_ =False
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCAmelCase__ : int = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
lowerCAmelCase__ : int = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file ,'''w''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = '''lower newer'''
lowerCAmelCase__ : List[Any] = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = XLMTokenizer(self.vocab_file ,self.merges_file )
lowerCAmelCase__ : Optional[int] = '''lower'''
lowerCAmelCase__ : List[str] = ['''low''', '''er</w>''']
lowerCAmelCase__ : List[str] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : str = tokens + ['''<unk>''']
lowerCAmelCase__ : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,__lowerCamelCase )
@slow
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowerCAmelCase__ : Dict = tokenizer.encode('''sequence builders''' ,add_special_tokens=__lowerCamelCase )
lowerCAmelCase__ : int = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ,__lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 90
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case : str =logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =["""pixel_values"""]
def __init__(self ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = PILImageResampling.BICUBIC ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = 1 / 2_55 ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = True ,**__lowerCamelCase ,) -> None:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 2_24}
lowerCAmelCase__ : Union[str, Any] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ,param_name='''crop_size''' )
lowerCAmelCase__ : Optional[int] = do_resize
lowerCAmelCase__ : Any = size
lowerCAmelCase__ : int = resample
lowerCAmelCase__ : Dict = do_center_crop
lowerCAmelCase__ : str = crop_size
lowerCAmelCase__ : Dict = do_rescale
lowerCAmelCase__ : Optional[Any] = rescale_factor
lowerCAmelCase__ : Dict = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : int = do_convert_rgb
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = PILImageResampling.BICUBIC ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ : Optional[int] = get_resize_output_image_size(__lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> int:
"""simple docstring"""
return rescale(__lowerCamelCase ,scale=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
return normalize(__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = ChannelDimension.FIRST ,**__lowerCamelCase ,) -> PIL.Image.Image:
"""simple docstring"""
lowerCAmelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Tuple = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(__lowerCamelCase ,param_name='''size''' ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : Tuple = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : str = get_size_dict(__lowerCamelCase ,param_name='''crop_size''' ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Optional[int] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Union[str, Any] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Any = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowerCAmelCase__ : str = [self.resize(image=__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=__lowerCamelCase ,size=__lowerCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ : Union[str, Any] = [self.rescale(image=__lowerCamelCase ,scale=__lowerCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ : Optional[Any] = [self.normalize(image=__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ) for image in images]
lowerCAmelCase__ : List[Any] = [to_channel_dimension_format(__lowerCamelCase ,__lowerCamelCase ) for image in images]
lowerCAmelCase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase ,tensor_type=__lowerCamelCase )
| 90
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class snake_case :
lowercase_ = True
lowercase_ = None
# Automatically constructed
lowercase_ = "PIL.Image.Image"
lowercase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowercase_ = field(default='Image' , init=UpperCamelCase_ , repr=UpperCamelCase_ )
def __call__( self : str )-> Dict:
"""simple docstring"""
return self.pa_type
def __lowercase( self : str , a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] )-> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(a_ )
if isinstance(a_ , a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_ , a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowercase( self : Union[str, Any] , a_ : dict , a_ : str=None )-> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE__ : List[str] = {}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a_ ):
SCREAMING_SNAKE_CASE__ : Any = PIL.Image.open(a_ )
else:
SCREAMING_SNAKE_CASE__ : str = path.split('::' )[-1]
try:
SCREAMING_SNAKE_CASE__ : str = string_to_dict(a_ , config.HUB_DATASETS_URL )['repo_id']
SCREAMING_SNAKE_CASE__ : Tuple = token_per_repo_id.get(a_ )
except ValueError:
SCREAMING_SNAKE_CASE__ : Tuple = None
with xopen(a_ , 'rb' , use_auth_token=a_ ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = BytesIO(f.read() )
SCREAMING_SNAKE_CASE__ : str = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase( self : List[str] )-> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase( self : Optional[Any] , a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] )-> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.array([None] * len(a_ ) , type=pa.binary() )
SCREAMING_SNAKE_CASE__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.array([None] * len(a_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : str = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
SCREAMING_SNAKE_CASE__ : List[Any] = storage.field('bytes' )
else:
SCREAMING_SNAKE_CASE__ : Dict = pa.array([None] * len(a_ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
SCREAMING_SNAKE_CASE__ : Tuple = storage.field('path' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.array([None] * len(a_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE__ : List[Any] = pa.array(
[encode_np_array(np.array(a_ ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa.array([None] * len(a_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(a_ , self.pa_type )
def __lowercase( self : List[Any] , a_ : pa.StructArray )-> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_ , 'rb' ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f.read()
return bytes_
SCREAMING_SNAKE_CASE__ : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ : Dict = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE__ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(a_ , self.pa_type )
def _a ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE__ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _a ( lowercase__ : "PIL.Image.Image" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE__ : int = image.format
else:
SCREAMING_SNAKE_CASE__ : Tuple = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowercase__ , format=lowercase__ )
return buffer.getvalue()
def _a ( lowercase__ : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(lowercase__ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
SCREAMING_SNAKE_CASE__ : Tuple = array.dtype
SCREAMING_SNAKE_CASE__ : List[str] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE__ : Dict = dtype.kind
SCREAMING_SNAKE_CASE__ : Dict = dtype.itemsize
SCREAMING_SNAKE_CASE__ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE__ : List[str] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE__ : List[str] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dtype_byteorder + dtype_kind + str(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = np.dtype(lowercase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = PIL.Image.fromarray(array.astype(lowercase__ ) )
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _a ( lowercase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = first_non_null_value(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase__ , np.ndarray ):
SCREAMING_SNAKE_CASE__ : Tuple = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
elif isinstance(lowercase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : Dict = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
else:
return objs
else:
return objs
| 85
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase = 1_0
lowerCamelCase = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
lowerCamelCase = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(UpperCamelCase_ ) ),
} , features=UpperCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=UpperCamelCase_ )
return filename
# FILE_CONTENT + files
_lowerCAmelCase : List[str] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt'
lowerCamelCase = FILE_CONTENT
with open(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
import bza
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with bza.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with gzip.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with lza.frame.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(UpperCamelCase_ , 'w' ) as archive:
archive.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
import tarfile
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
import lzma
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with lzma.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
import zipfile
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with zstd.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.xml'
lowerCamelCase = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ )
return filename
_lowerCAmelCase : int = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
_lowerCAmelCase : Dict = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
_lowerCAmelCase : List[str] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
_lowerCAmelCase : Tuple = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
_lowerCAmelCase : Union[str, Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def a_ ( ) -> List[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase = datasets.Dataset.from_dict(UpperCamelCase_ )
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(UpperCamelCase_ ) ) as con:
lowerCamelCase = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(UpperCamelCase_ , 'w' , newline='' ) as f:
lowerCamelCase = csv.DictWriter(UpperCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(UpperCamelCase_ , 'w' , newline='' ) as f:
lowerCamelCase = csv.DictWriter(UpperCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
import bza
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(UpperCamelCase_ , 'rb' ) as f:
lowerCamelCase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
lowerCamelCase = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(UpperCamelCase_ , 'wb' ) as f:
lowerCamelCase = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ )
lowerCamelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase_ ) )] for k in DATA[0]} , schema=UpperCamelCase_ )
writer.write_table(UpperCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase = {'data': DATA}
with open(UpperCamelCase_ , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase = {'data': DATA_DICT_OF_LISTS}
with open(UpperCamelCase_ , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(UpperCamelCase_ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(UpperCamelCase_ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(UpperCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( ) -> List[str]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def a_ ( ) -> List[str]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 246
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : Union[List[PIL.Image.Image], np.ndarray]
__lowerCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : np.ndarray
__lowerCamelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 711
|
from string import ascii_lowercase, ascii_uppercase
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
if not sentence:
return ""
SCREAMING_SNAKE_CASE_ : int = dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 311
| 0
|
from functools import reduce
lowerCamelCase_ : Dict = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def A__ ( lowerCamelCase = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase , lowerCamelCase : str(int(lowerCamelCase ) * int(lowerCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 548
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _UpperCamelCase ( unittest.TestCase , _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = load_tool("""text-classification""" )
self.tool.setup()
UpperCamelCase_: Optional[int] = load_tool("""text-classification""" , remote=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Union[str, Any] = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: int = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
| 548
| 1
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
with open(__lowercase ) as metadata_file:
A__ : str = json.load(__lowercase )
A__ : Any = LukeConfig(use_entity_aware_attention=__lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A__ : Union[str, Any] = torch.load(__lowercase , map_location="cpu" )
# Load the entity vocab file
A__ : Optional[int] = load_entity_vocab(__lowercase )
A__ : int = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A__ : Dict = AddedToken("<ent>" , lstrip=__lowercase , rstrip=__lowercase )
A__ : int = AddedToken("<ent2>" , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__lowercase , __lowercase )
A__ : Dict = LukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
A__ : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
A__ : str = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A__ : Any = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A__ : Optional[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A__ : Optional[Any] = F'encoder.layer.{layer_index}.attention.self.'
A__ : List[str] = state_dict[prefix + matrix_name]
A__ : Optional[Any] = state_dict[prefix + matrix_name]
A__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A__ : Optional[int] = state_dict["entity_embeddings.entity_embeddings.weight"]
A__ : Dict = entity_emb[entity_vocab["[MASK]"]]
A__ : Union[str, Any] = LukeModel(config=__lowercase ).eval()
A__ , A__ : Dict = model.load_state_dict(__lowercase , strict=__lowercase )
if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(__lowercase )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
A__ : List[str] = LukeTokenizer.from_pretrained(__lowercase , task="entity_classification" )
A__ : Dict = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A__ : List[str] = (3_9, 4_2)
A__ : str = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors="pt" )
A__ : Optional[Any] = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
A__ : Union[str, Any] = torch.Size((1, 4_2, 1_0_2_4) )
A__ : List[str] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A__ : List[Any] = torch.Size((1, 4_2, 7_6_8) )
A__ : List[str] = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A__ : Dict = torch.Size((1, 1, 1_0_2_4) )
A__ : Union[str, Any] = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A__ : int = torch.Size((1, 1, 7_6_8) )
A__ : Any = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__lowercase ) )
model.save_pretrained(__lowercase )
def snake_case__ ( __lowercase ) -> List[Any]:
"""simple docstring"""
A__ : Tuple = {}
with open(__lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(__lowercase ):
A__ , A__ : List[str] = line.rstrip().split("\t" )
A__ : List[str] = index
return entity_vocab
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
snake_case : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 182
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( __lowercase , __lowercase ) -> int:
"""simple docstring"""
A__ : List[str] = checkpoint
A__ : Dict = {}
A__ : Union[str, Any] = vae_state_dict["encoder.conv_in.weight"]
A__ : int = vae_state_dict["encoder.conv_in.bias"]
A__ : List[Any] = vae_state_dict["encoder.conv_out.weight"]
A__ : Optional[int] = vae_state_dict["encoder.conv_out.bias"]
A__ : int = vae_state_dict["encoder.norm_out.weight"]
A__ : Optional[int] = vae_state_dict["encoder.norm_out.bias"]
A__ : Optional[int] = vae_state_dict["decoder.conv_in.weight"]
A__ : Optional[Any] = vae_state_dict["decoder.conv_in.bias"]
A__ : str = vae_state_dict["decoder.conv_out.weight"]
A__ : Optional[int] = vae_state_dict["decoder.conv_out.bias"]
A__ : Union[str, Any] = vae_state_dict["decoder.norm_out.weight"]
A__ : Optional[Any] = vae_state_dict["decoder.norm_out.bias"]
A__ : Optional[Any] = vae_state_dict["quant_conv.weight"]
A__ : Optional[Any] = vae_state_dict["quant_conv.bias"]
A__ : Dict = vae_state_dict["post_quant_conv.weight"]
A__ : Union[str, Any] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
A__ : str = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
A__ : Optional[int] = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(__lowercase )
}
# Retrieves the keys for the decoder up blocks only
A__ : Any = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
A__ : Any = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(__lowercase )
}
for i in range(__lowercase ):
A__ : Any = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
A__ : Any = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
A__ : Optional[int] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
A__ : Any = renew_vae_resnet_paths(__lowercase )
A__ : int = {"old": F'down.{i}.block', "new": F'down_blocks.{i}.resnets'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : int = [key for key in vae_state_dict if "encoder.mid.block" in key]
A__ : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ : Optional[int] = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
A__ : Optional[Any] = renew_vae_resnet_paths(__lowercase )
A__ : int = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : str = [key for key in vae_state_dict if "encoder.mid.attn" in key]
A__ : List[Any] = renew_vae_attention_paths(__lowercase )
A__ : Optional[int] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
for i in range(__lowercase ):
A__ : Dict = num_up_blocks - 1 - i
A__ : int = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
A__ : Tuple = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
A__ : List[Any] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
A__ : str = renew_vae_resnet_paths(__lowercase )
A__ : List[str] = {"old": F'up.{block_id}.block', "new": F'up_blocks.{i}.resnets'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : int = [key for key in vae_state_dict if "decoder.mid.block" in key]
A__ : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ : Union[str, Any] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
A__ : Dict = renew_vae_resnet_paths(__lowercase )
A__ : int = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A__ : Optional[Any] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
A__ : Union[str, Any] = renew_vae_attention_paths(__lowercase )
A__ : List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
return new_checkpoint
def snake_case__ ( __lowercase , __lowercase , ) -> Tuple:
"""simple docstring"""
A__ : List[str] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
A__ : Dict = io.BytesIO(r.content )
A__ : str = OmegaConf.load(__lowercase )
A__ : Any = 5_1_2
A__ : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
A__ : str = {}
with safe_open(__lowercase , framework="pt" , device="cpu" ) as f:
for key in f.keys():
A__ : List[str] = f.get_tensor(__lowercase )
else:
A__ : str = torch.load(__lowercase , map_location=__lowercase )["state_dict"]
# Convert the VAE model.
A__ : List[str] = create_vae_diffusers_config(__lowercase , image_size=__lowercase )
A__ : List[Any] = custom_convert_ldm_vae_checkpoint(__lowercase , __lowercase )
A__ : Dict = AutoencoderKL(**__lowercase )
vae.load_state_dict(__lowercase )
vae.save_pretrained(__lowercase )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
snake_case : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 182
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''dandelin/vilt-b32-finetuned-vqa'''
UpperCamelCase_ = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
UpperCamelCase_ = '''image_qa'''
UpperCamelCase_ = AutoProcessor
UpperCamelCase_ = AutoModelForVisualQuestionAnswering
UpperCamelCase_ = ['''image''', '''text''']
UpperCamelCase_ = ['''text''']
def __init__( self : List[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : "Image" , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
return self.pre_processor(UpperCAmelCase , UpperCAmelCase , return_tensors='''pt''' )
def A__ ( self : int , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model(**UpperCAmelCase ).logits
def A__ ( self : List[Any] , UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 94
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase_ = 8.9_88e9 # units = N * m^s * C^-2
def __lowerCamelCase ( a_ : float , a_ : float , a_ : float , a_ : float ) -> dict[str, float]:
__SCREAMING_SNAKE_CASE :int = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__SCREAMING_SNAKE_CASE :int = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__SCREAMING_SNAKE_CASE :Optional[Any] = abs(a_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__SCREAMING_SNAKE_CASE :List[Any] = abs(a_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__SCREAMING_SNAKE_CASE :Tuple = (COULOMBS_CONSTANT * charge_product / abs(a_ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498
| 0
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : Optional[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Optional[int] = False
__lowercase : Optional[int] = False
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Dict:
"""simple docstring"""
_UpperCamelCase = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
_UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Any:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = embedding_size
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = TFMobileBertModel(config=lowerCamelCase_ )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowerCamelCase_ )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase = TFMobileBertForMaskedLM(config=lowerCamelCase_ )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFMobileBertForNextSentencePrediction(config=lowerCamelCase_ )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = TFMobileBertForPreTraining(config=lowerCamelCase_ )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFMobileBertForSequenceClassification(config=lowerCamelCase_ )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFMobileBertForMultipleChoice(config=lowerCamelCase_ )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFMobileBertForTokenClassification(config=lowerCamelCase_ )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TFMobileBertForQuestionAnswering(config=lowerCamelCase_ )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
@slow
def lowercase ( self ) -> str:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
_UpperCamelCase = TFMobileBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(lowerCamelCase_ )[0]
_UpperCamelCase = [1, 6, 3_05_22]
self.assertEqual(output.shape , lowerCamelCase_ )
_UpperCamelCase = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 589
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""YolosFeatureExtractor"""]
__lowerCAmelCase = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589
| 1
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
def __init__( self : Optional[Any] ,A : Any ,A : List[str]=2 ,A : Optional[Any]=8 ,A : List[Any]=True ,A : str=True ,A : Dict=True ,A : Dict=True ,A : Any=99 ,A : Dict=16 ,A : Optional[int]=5 ,A : Tuple=2 ,A : str=36 ,A : List[Any]="gelu" ,A : str=0.0 ,A : str=0.0 ,A : List[str]=512 ,A : Dict=16 ,A : int=2 ,A : int=0.0_2 ,A : List[Any]=3 ,A : Dict=4 ,A : int=None ,):
'''simple docstring'''
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : List[Any] = is_training
UpperCAmelCase__ : str = use_input_mask
UpperCAmelCase__ : str = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Optional[Any] = type_vocab_size
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Tuple = num_choices
UpperCAmelCase__ : List[Any] = scope
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : str ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_config()
UpperCAmelCase__ : Dict = 300
return config
def __lowercase ( self : Dict ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowercase ( self : List[Any] ,A : List[Any] ,A : Any ,A : List[Any] ,A : List[str] ,A : int ,A : Dict ,A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = MraModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : str = model(A ,attention_mask=A ,token_type_ids=A )
UpperCAmelCase__ : Union[str, Any] = model(A ,token_type_ids=A )
UpperCAmelCase__ : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Dict ,A : Union[str, Any] ,A : Any ,A : Any ,A : int ,A : Union[str, Any] ,A : List[str] ,A : int ,A : int ,A : Any ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : List[Any] = MraModel(A )
model.to(A )
model.eval()
UpperCAmelCase__ : Optional[int] = model(
A ,attention_mask=A ,token_type_ids=A ,encoder_hidden_states=A ,encoder_attention_mask=A ,)
UpperCAmelCase__ : Optional[int] = model(
A ,attention_mask=A ,token_type_ids=A ,encoder_hidden_states=A ,)
UpperCAmelCase__ : int = model(A ,attention_mask=A ,token_type_ids=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Union[str, Any] ,A : List[Any] ,A : Tuple ,A : Dict ,A : List[Any] ,A : Any ,A : Any ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = MraForMaskedLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : int = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : Any ,A : int ,A : int ,A : Any ,A : List[Any] ,A : Any ,A : Tuple ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = MraForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : Dict = model(
A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowercase ( self : List[Any] ,A : List[Any] ,A : int ,A : str ,A : Any ,A : int ,A : str ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : Any = MraForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : List[str] ,A : Optional[int] ,A : List[Any] ,A : str ,A : Optional[int] ,A : int ,A : Union[str, Any] ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : Any = MraForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : List[str] = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Optional[Any] ,A : Tuple ,A : Tuple ,A : Tuple ,A : int ,A : Optional[int] ,A : int ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_choices
UpperCAmelCase__ : List[str] = MraForMultipleChoice(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : str = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ : Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase__ : str = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase__ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = ()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = MraModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self ,config_class=A ,hidden_size=37 )
def __lowercase ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : List[str] = type
self.model_tester.create_and_check_model(*A )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def __lowercase ( self : int ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = MraModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="""MRA does not output attentions""" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return
@require_torch
class __lowercase ( unittest.TestCase ):
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
UpperCAmelCase__ : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase__ : Dict = model(A )[0]
UpperCAmelCase__ : Tuple = torch.Size((1, 256, 768) )
self.assertEqual(output.shape ,A )
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,A ,atol=1e-4 ) )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
UpperCAmelCase__ : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase__ : Dict = model(A )[0]
UpperCAmelCase__ : Union[str, Any] = 50_265
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape ,A )
UpperCAmelCase__ : List[str] = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,A ,atol=1e-4 ) )
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
UpperCAmelCase__ : Optional[int] = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase__ : Any = model(A )[0]
UpperCAmelCase__ : Tuple = 50_265
UpperCAmelCase__ : Tuple = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape ,A )
UpperCAmelCase__ : Tuple = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,A ,atol=1e-4 ) )
| 65
|
from __future__ import annotations
import numpy as np
def A__ ( _a : np.ndarray ):
'''simple docstring'''
snake_case__ , snake_case__ : str =np.shape(_a )
if rows != columns:
snake_case__ : Any =(
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(_a )
snake_case__ : Dict =np.zeros((rows, columns) )
snake_case__ : str =np.zeros((rows, columns) )
for i in range(_a ):
for j in range(_a ):
snake_case__ : Optional[int] =sum(lower[i][k] * upper[k][j] for k in range(_a ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
snake_case__ : List[Any] =(table[i][j] - total) / upper[j][j]
snake_case__ : Optional[int] =1
for j in range(_a , _a ):
snake_case__ : int =sum(lower[i][k] * upper[k][j] for k in range(_a ) )
snake_case__ : Dict =table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385
| 0
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A ( unittest.TestCase ):
__UpperCAmelCase : Optional[int] = MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCAmelCase : Tuple = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
_a = text_generator("This is a test" , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
_a = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
snake_case_ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
_a = text_generator("This is a test" , do_sample=snake_case_ , num_return_sequences=2 , return_tensors=snake_case_ )
self.assertEqual(
snake_case_ , [
{"generated_token_ids": ANY(snake_case_ )},
{"generated_token_ids": ANY(snake_case_ )},
] , )
_a = text_generator.model.config.eos_token_id
_a = "<pad>"
_a = text_generator(
["This is a test", "This is a second test"] , do_sample=snake_case_ , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case_ , )
self.assertEqual(
snake_case_ , [
[
{"generated_token_ids": ANY(snake_case_ )},
{"generated_token_ids": ANY(snake_case_ )},
],
[
{"generated_token_ids": ANY(snake_case_ )},
{"generated_token_ids": ANY(snake_case_ )},
],
] , )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
_a = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
_a = text_generator("This is a test" , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
_a = text_generator(["This is a test", "This is a second test"] , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> str:
_a = TextGenerationPipeline(model=snake_case_ , tokenizer=snake_case_ )
return text_generator, ["This is a test", "Another test"]
def __lowerCAmelCase ( self ) -> Dict:
_a = "Hello I believe in"
_a = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
_a = text_generator(snake_case_ )
self.assertEqual(
snake_case_ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
_a = text_generator(snake_case_ , stop_sequence=" fe" )
self.assertEqual(snake_case_ , [{"generated_text": "Hello I believe in fe"}] )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[Any]:
_a = text_generator.model
_a = text_generator.tokenizer
_a = text_generator("This is a test" )
self.assertEqual(snake_case_ , [{"generated_text": ANY(snake_case_ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_a = text_generator("This is a test" , return_full_text=snake_case_ )
self.assertEqual(snake_case_ , [{"generated_text": ANY(snake_case_ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_a = pipeline(task="text-generation" , model=snake_case_ , tokenizer=snake_case_ , return_full_text=snake_case_ )
_a = text_generator("This is a test" )
self.assertEqual(snake_case_ , [{"generated_text": ANY(snake_case_ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
_a = text_generator("This is a test" , return_full_text=snake_case_ )
self.assertEqual(snake_case_ , [{"generated_text": ANY(snake_case_ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
_a = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
[{"generated_text": ANY(snake_case_ )}, {"generated_text": ANY(snake_case_ )}],
[{"generated_text": ANY(snake_case_ )}, {"generated_text": ANY(snake_case_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_a = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case_ )
self.assertEqual(
snake_case_ , [
[{"generated_text": ANY(snake_case_ )}, {"generated_text": ANY(snake_case_ )}],
[{"generated_text": ANY(snake_case_ )}, {"generated_text": ANY(snake_case_ )}],
] , )
with self.assertRaises(snake_case_ ):
_a = text_generator("test" , return_full_text=snake_case_ , return_text=snake_case_ )
with self.assertRaises(snake_case_ ):
_a = text_generator("test" , return_full_text=snake_case_ , return_tensors=snake_case_ )
with self.assertRaises(snake_case_ ):
_a = text_generator("test" , return_text=snake_case_ , return_tensors=snake_case_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_a = text_generator("" )
self.assertEqual(snake_case_ , [{"generated_text": ANY(snake_case_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_a = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_a = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 5_0_0 , max_new_tokens=2_0 )
_a = text_generator("This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(snake_case_ ):
text_generator(
"This is a test" * 5_0_0 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self ) -> Optional[int]:
import torch
# Classic `model_kwargs`
_a = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_a = pipe("This is a test" )
self.assertEqual(
snake_case_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_a = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_a = pipe("This is a test" )
self.assertEqual(
snake_case_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_a = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_a = pipe("This is a test" )
self.assertEqual(
snake_case_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def __lowerCAmelCase ( self ) -> str:
import torch
_a = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self ) -> Optional[int]:
import torch
_a = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=snake_case_ , top_p=0.5 )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = "Hello world"
_a = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
_a = logging.get_logger("transformers.generation.tf_utils" )
else:
_a = logging.get_logger("transformers.generation.utils" )
_a = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(snake_case_ ) as cl:
_a = text_generator(snake_case_ , max_length=1_0 , max_new_tokens=1 )
self.assertIn(snake_case_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(snake_case_ ) as cl:
_a = text_generator(snake_case_ , max_new_tokens=1 )
self.assertNotIn(snake_case_ , cl.out )
with CaptureLogger(snake_case_ ) as cl:
_a = text_generator(snake_case_ , max_length=1_0 )
self.assertNotIn(snake_case_ , cl.out )
| 691
|
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691
| 1
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
A__ , A__ = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''',from_pt=__lowerCamelCase,dtype=jnp.bfloataa )
A__ , A__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''',controlnet=__lowerCamelCase,from_pt=__lowerCamelCase,dtype=jnp.bfloataa )
A__ = controlnet_params
A__ = '''bird'''
A__ = jax.device_count()
A__ = pipe.prepare_text_inputs([prompts] * num_samples )
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
A__ = pipe.prepare_image_inputs([canny_image] * num_samples )
A__ = jax.random.PRNGKey(0 )
A__ = jax.random.split(__lowerCamelCase,jax.device_count() )
A__ = replicate(__lowerCamelCase )
A__ = shard(__lowerCamelCase )
A__ = shard(__lowerCamelCase )
A__ = pipe(
prompt_ids=__lowerCamelCase,image=__lowerCamelCase,params=__lowerCamelCase,prng_seed=__lowerCamelCase,num_inference_steps=50,jit=__lowerCamelCase,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ = images[0, 253:256, 253:256, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
A__ , A__ = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''',from_pt=__lowerCamelCase,dtype=jnp.bfloataa )
A__ , A__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''',controlnet=__lowerCamelCase,from_pt=__lowerCamelCase,dtype=jnp.bfloataa )
A__ = controlnet_params
A__ = '''Chef in the kitchen'''
A__ = jax.device_count()
A__ = pipe.prepare_text_inputs([prompts] * num_samples )
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
A__ = pipe.prepare_image_inputs([pose_image] * num_samples )
A__ = jax.random.PRNGKey(0 )
A__ = jax.random.split(__lowerCamelCase,jax.device_count() )
A__ = replicate(__lowerCamelCase )
A__ = shard(__lowerCamelCase )
A__ = shard(__lowerCamelCase )
A__ = pipe(
prompt_ids=__lowerCamelCase,image=__lowerCamelCase,params=__lowerCamelCase,prng_seed=__lowerCamelCase,num_inference_steps=50,jit=__lowerCamelCase,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
A__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A__ = images[0, 253:256, 253:256, -1]
A__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A__ = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 190
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__: List[Any] = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
a__: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 190
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""})
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase = CLIPTextModel(A_ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self , A_ , A_=0 )-> int:
'''simple docstring'''
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = 2
UpperCamelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , )
UpperCamelCase = floats_tensor(control_image.shape , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((64, 64) )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(A_ ):
if isinstance(A_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A_ )
torch.manual_seed(0 )
UpperCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A_ )
torch.manual_seed(0 )
UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase = CLIPTextModel(A_ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self , A_ , A_=0 )-> str:
'''simple docstring'''
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = 2
UpperCamelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A_ , device=torch.device(A_ ) , ),
]
UpperCamelCase = floats_tensor(control_image[0].shape , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((64, 64) )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**A_ )
pipe.to(A_ )
UpperCamelCase = 10.0
UpperCamelCase = 4
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = steps
UpperCamelCase = scale
UpperCamelCase = pipe(**A_ )[0]
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = steps
UpperCamelCase = scale
UpperCamelCase = pipe(**A_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = steps
UpperCamelCase = scale
UpperCamelCase = pipe(**A_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase = self.get_dummy_inputs(A_ )
UpperCamelCase = steps
UpperCamelCase = scale
UpperCamelCase = pipe(**A_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(A_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
UpperCamelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=A_ , controlnet=A_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase = 'evil space-punk bird'
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
UpperCamelCase = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
UpperCamelCase = pipe(
A_ , A_ , control_image=A_ , generator=A_ , output_type='np' , num_inference_steps=50 , strength=0.6 , )
UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 703
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Any = logging.get_logger(__name__)
def A_( A : List[Any] , A : Union[str, Any]=False):
UpperCamelCase = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token'))
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings'))
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'))
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'))
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'))
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'))
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight'''))
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias'''))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias'''))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
])
# fmt: on
return rename_keys
def A_( A : Optional[int] , A : str , A : str=False):
for i in range(config.num_hidden_layers):
if base_model:
UpperCamelCase = ''
else:
UpperCamelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''')
UpperCamelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase = in_proj_bias[: config.hidden_size]
UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase = in_proj_bias[-config.hidden_size :]
def A_( A : Optional[Any]):
UpperCamelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(A , A)
def A_( A : Union[str, Any] , A : List[str] , A : Optional[Any]):
UpperCamelCase = dct.pop(A)
UpperCamelCase = val
def A_( ):
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(A , stream=A).raw)
return im
@torch.no_grad()
def A_( A : Optional[Any] , A : Optional[int] , A : Union[str, Any]=False):
UpperCamelCase = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=A , )
UpperCamelCase = ViTHybridConfig(backbone_config=A , image_size=384 , num_labels=1000)
UpperCamelCase = False
# load original model from timm
UpperCamelCase = timm.create_model(A , pretrained=A)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(A)
UpperCamelCase = create_rename_keys(A , A)
for src, dest in rename_keys:
rename_key(A , A , A)
read_in_q_k_v(A , A , A)
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = 'imagenet-1k-id2label.json'
UpperCamelCase = json.load(open(hf_hub_download(A , A , repo_type='dataset') , 'r'))
UpperCamelCase = {int(A): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCamelCase = ViTHybridModel(A).eval()
else:
UpperCamelCase = ViTHybridForImageClassification(A).eval()
model.load_state_dict(A)
# create image processor
UpperCamelCase = create_transform(**resolve_data_config({} , model=A))
UpperCamelCase = transform.transforms
UpperCamelCase = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
UpperCamelCase = ViTHybridImageProcessor(
do_resize=A , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCamelCase = prepare_img()
UpperCamelCase = transform(A).unsqueeze(0)
UpperCamelCase = processor(A , return_tensors='pt').pixel_values
# verify pixel values
assert torch.allclose(A , A)
# verify logits
with torch.no_grad():
UpperCamelCase = model(A)
UpperCamelCase = outputs.logits
print('Predicted class:' , logits.argmax(-1).item())
if base_model:
UpperCamelCase = timm_model.forward_features(A)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A , outputs.pooler_output , atol=1E-3)
else:
UpperCamelCase = timm_model(A)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A , outputs.logits , atol=1E-3)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
Path(A).mkdir(exist_ok=A)
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(A)
print(f'''Saving processor to {pytorch_dump_folder_path}''')
processor.save_pretrained(A)
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''')
model.push_to_hub(f'''ybelkada/{vit_name}''')
processor.push_to_hub(f'''ybelkada/{vit_name}''')
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCAmelCase : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 432
| 0
|
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __snake_case (lowerCamelCase ):
def __init__( self: str , A_: Optional[int] , A_: str=13 , A_: Optional[int]=7 , A_: Any=True , A_: Tuple=True , A_: int=False , A_: List[str]=True , A_: Optional[int]=99 , A_: Dict=32 , A_: List[Any]=5 , A_: Optional[Any]=4 , A_: Optional[Any]=64 , A_: Union[str, Any]="gelu" , A_: Optional[Any]=0.1 , A_: Union[str, Any]=0.1 , A_: str=5_12 , A_: Optional[int]=16 , A_: int=2 , A_: str=0.02 , A_: int=3 , A_: Dict=4 , A_: str=None , A_: Optional[int]=2 , A_: Optional[Any]=2 , A_: Any=2 , A_: Tuple=2 , A_: Union[str, Any]=4 , A_: List[Any]=1 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = q_groups
__lowerCamelCase = k_groups
__lowerCamelCase = v_groups
__lowerCamelCase = post_attention_groups
__lowerCamelCase = intermediate_groups
__lowerCamelCase = output_groups
def __a ( self: List[Any] ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self: Any ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __a ( self: str , A_: Dict , A_: str , A_: Optional[Any] , A_: List[Any] , A_: List[Any] , A_: Tuple ):
__lowerCamelCase = SqueezeBertModel(config=A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = model(A_ , A_ )
__lowerCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self: Union[str, Any] , A_: List[Any] , A_: int , A_: Union[str, Any] , A_: Any , A_: Optional[Any] , A_: int ):
__lowerCamelCase = SqueezeBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self: Optional[int] , A_: Optional[int] , A_: Union[str, Any] , A_: Optional[Any] , A_: Dict , A_: List[str] , A_: Optional[Any] ):
__lowerCamelCase = SqueezeBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self: Dict , A_: Dict , A_: Optional[Any] , A_: Dict , A_: int , A_: Optional[int] , A_: Any ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = SqueezeBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self: Optional[Any] , A_: int , A_: Dict , A_: Optional[int] , A_: List[str] , A_: Dict , A_: Any ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = SqueezeBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self: List[str] , A_: Dict , A_: Optional[int] , A_: Tuple , A_: List[Any] , A_: Optional[Any] , A_: Any ):
__lowerCamelCase = self.num_choices
__lowerCamelCase = SqueezeBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self: Any ):
__lowerCamelCase = self.prepare_config_and_inputs()
((__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase) ,(__lowerCamelCase)) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__a = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a = False
__a = True
__a = False
def __a ( self: Dict ):
__lowerCamelCase = SqueezeBertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=A_ , dim=37 )
def __a ( self: int ):
self.config_tester.run_common_tests()
def __a ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*A_ )
def __a ( self: str ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*A_ )
def __a ( self: Optional[int] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*A_ )
def __a ( self: List[str] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*A_ )
def __a ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*A_ )
def __a ( self: Optional[int] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*A_ )
@slow
def __a ( self: str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = SqueezeBertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case (unittest.TestCase ):
@slow
def __a ( self: List[Any] ):
__lowerCamelCase = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
__lowerCamelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
__lowerCamelCase = model(A_ )[0]
__lowerCamelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , A_ )
__lowerCamelCase = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-4 ) )
| 281
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __snake_case (unittest.TestCase ):
def __init__( self: Any , A_: Optional[int] , A_: Any=7 , A_: Union[str, Any]=3 , A_: List[Any]=30 , A_: int=4_00 , A_: Any=True , A_: str=None , A_: Dict=True , A_: Optional[Any]=[0.5, 0.5, 0.5] , A_: List[Any]=[0.5, 0.5, 0.5] , A_: Any=True , A_: Optional[Any]=1 / 2_55 , A_: int=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
def __a ( self: Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self: int , A_: List[str] , A_: List[Any]=False ):
if not batched:
__lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
__lowerCamelCase ,__lowerCamelCase = image.size
else:
__lowerCamelCase ,__lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCamelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCamelCase = self.size["""shortest_edge"""]
__lowerCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCamelCase = self.size["""shortest_edge"""]
__lowerCamelCase = self.size["""shortest_edge"""]
else:
__lowerCamelCase = []
for image in image_inputs:
__lowerCamelCase ,__lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
__lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case (lowerCamelCase , unittest.TestCase ):
__a = DetaImageProcessor if is_vision_available() else None
def __a ( self: Tuple ):
__lowerCamelCase = DetaImageProcessingTester(self )
@property
def __a ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self: Tuple ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , """image_mean""" ) )
self.assertTrue(hasattr(A_ , """image_std""" ) )
self.assertTrue(hasattr(A_ , """do_normalize""" ) )
self.assertTrue(hasattr(A_ , """do_resize""" ) )
self.assertTrue(hasattr(A_ , """do_rescale""" ) )
self.assertTrue(hasattr(A_ , """do_pad""" ) )
self.assertTrue(hasattr(A_ , """size""" ) )
def __a ( self: List[Any] ):
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
def __a ( self: Any ):
pass
def __a ( self: Optional[Any] ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
__lowerCamelCase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self: int ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self: Dict ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __a ( self: List[str] ):
# prepare image and target
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
__lowerCamelCase = DetaImageProcessor()
__lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors="""pt""" )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A_ )
__lowerCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
__lowerCamelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A_ ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A_ )
__lowerCamelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A_ , atol=1E-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A_ ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A_ ) )
# verify class_labels
__lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A_ ) )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A_ ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A_ ) )
@slow
def __a ( self: Tuple ):
# prepare image, target and masks_path
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__lowerCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__lowerCamelCase = DetaImageProcessor(format="""coco_panoptic""" )
__lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors="""pt""" )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A_ )
__lowerCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
__lowerCamelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A_ ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A_ )
__lowerCamelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A_ , atol=1E-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A_ ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A_ ) )
# verify class_labels
__lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A_ ) )
# verify masks
__lowerCamelCase = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A_ )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A_ ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A_ ) )
| 281
| 1
|
def UpperCAmelCase ( A__ ) -> int:
if not isinstance(A__ , A__ ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
def UpperCAmelCase ( A__ , A__ ) -> str:
try:
with open(A__ , """rb""" ) as flax_state_f:
_snake_case : str = from_bytes(A__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(A__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(A__ , A__ )
def UpperCAmelCase ( A__ , A__ ) -> Any:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_snake_case : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda A__ : x.dtype == jnp.bfloataa , A__ ) ).values()
if any(A__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_snake_case : Any = jax.tree_util.tree_map(
lambda A__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A__ )
_snake_case : int = """"""
_snake_case : Union[str, Any] = flatten_dict(A__ , sep=""".""" )
_snake_case : List[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
_snake_case : Optional[int] = []
_snake_case : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_snake_case : Union[str, Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_snake_case : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_snake_case : Optional[Any] = jnp.transpose(A__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_snake_case : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
_snake_case : Union[str, Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_snake_case : int = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(A__ ):
_snake_case : Optional[int] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
_snake_case : Optional[Any] = """.""".join(A__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
_snake_case : List[str] = np.asarray(A__ ) if not isinstance(A__ , np.ndarray ) else flax_tensor
_snake_case : Tuple = torch.from_numpy(A__ )
# remove from missing keys
missing_keys.remove(A__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A__ )
pt_model.load_state_dict(A__ )
# re-transform missing_keys to list
_snake_case : List[Any] = list(A__ )
if len(A__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(A__ ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
return pt_model
| 519
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( __lowerCamelCase ):
def __init__( self : int , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , __A , )
super().__init__(*__A , **__A )
| 527
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a__ : Union[str, Any] =getLogger(__name__)
a__ : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase__ ( __lowercase : List[str] , __lowercase : str , __lowercase : str , __lowercase : int = 8 , __lowercase : str = DEFAULT_DEVICE , __lowercase : Optional[int]=False , __lowercase : Optional[Any]="summarization" , __lowercase : List[str]=None , **__lowercase : List[str] , ) -> Dict:
"""simple docstring"""
__UpperCamelCase = Path(__lowercase ).open('w' , encoding='utf-8' )
__UpperCamelCase = str(__lowercase )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ).to(__lowercase )
if fpaa:
__UpperCamelCase = model.half()
__UpperCamelCase = AutoTokenizer.from_pretrained(__lowercase )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
__UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(__lowercase , __lowercase )
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(__lowercase , __lowercase ) ) ):
__UpperCamelCase = [prefix + text for text in examples_chunk]
__UpperCamelCase = tokenizer(__lowercase , return_tensors='pt' , truncation=__lowercase , padding='longest' ).to(__lowercase )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__lowercase , )
__UpperCamelCase = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__UpperCamelCase = int(time.time() - start_time ) # seconds
__UpperCamelCase = len(__lowercase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase__ ( ) -> Dict:
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def lowercase__ ( __lowercase : Dict=True ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('model_name' , type=__lowercase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=__lowercase , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=__lowercase , help='where to save summaries' )
parser.add_argument('--reference_path' , type=__lowercase , required=__lowercase , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=__lowercase , required=__lowercase , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=__lowercase , required=__lowercase , default=__lowercase , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=__lowercase , required=__lowercase , default=__lowercase , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=__lowercase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=__lowercase , default=8 , required=__lowercase , help='batch size' )
parser.add_argument(
'--n_obs' , type=__lowercase , default=-1 , required=__lowercase , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=__lowercase , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(__lowercase )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
__UpperCamelCase = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__lowercase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__UpperCamelCase = generate_summaries_or_translations(
__lowercase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__lowercase , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase = calculate_bleu if 'translation' in args.task else calculate_rouge
__UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__lowercase )]
__UpperCamelCase = score_fn(__lowercase , __lowercase )
scores.update(__lowercase )
if args.dump_args:
scores.update(__lowercase )
if args.info:
__UpperCamelCase = args.info
if verbose:
print(__lowercase )
if args.score_path is not None:
json.dump(__lowercase , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 399
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ : Dict = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = ["""DPTFeatureExtractor"""]
lowerCamelCase_ : Optional[int] = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase( __lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(__lowerCamelCase , __lowerCamelCase ) -> bool:
__a = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__a = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__a = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(__lowerCamelCase , __lowerCamelCase ) ) for _ in range(__lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = 1.0 ):
def identity_function(__lowerCamelCase ) -> float:
return x
__a = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__a = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def lowerCAmelCase( __lowerCamelCase ):
def function_to_integrate(__lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__a = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246
| 0
|
'''simple docstring'''
from __future__ import annotations
A__ : Any =list[tuple[int, int]]
A__ : Dict =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A__ : Tuple =([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class UpperCAmelCase :
def __init__( self : List[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : Node | None , ) -> Optional[Any]:
_lowerCAmelCase = pos_x
_lowerCAmelCase = pos_y
_lowerCAmelCase = (pos_y, pos_x)
_lowerCAmelCase = goal_x
_lowerCAmelCase = goal_y
_lowerCAmelCase = g_cost
_lowerCAmelCase = parent
_lowerCAmelCase = self.calculate_heuristic()
def lowercase__ ( self : int ) -> Dict:
_lowerCAmelCase = abs(self.pos_x - self.goal_x )
_lowerCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any] , __snake_case : int ) -> int:
return self.f_cost < other.f_cost
class UpperCAmelCase :
def __init__( self : str , __snake_case : tuple[int, int] , __snake_case : tuple[int, int] ) -> Dict:
_lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase__ )
_lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase__ )
_lowerCAmelCase = [self.start]
_lowerCAmelCase = []
_lowerCAmelCase = False
def lowercase__ ( self : str ) -> Dict:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_lowerCAmelCase = True
return self.retrace_path(lowerCamelCase__ )
self.closed_nodes.append(lowerCamelCase__ )
_lowerCAmelCase = self.get_successors(lowerCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase__ )
else:
# retrieve the best current path
_lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase__ )
else:
self.open_nodes.append(lowerCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def lowercase__ ( self : List[Any] , __snake_case : Node ) -> str:
_lowerCAmelCase = []
for action in delta:
_lowerCAmelCase = parent.pos_x + action[1]
_lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase__ , ) )
return successors
def lowercase__ ( self : Dict , __snake_case : Node | None ) -> Tuple:
_lowerCAmelCase = node
_lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
A__ : Any =(0, 0)
A__ : str =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
A__ : List[str] =GreedyBestFirst(init, goal)
A__ : Optional[int] =greedy_bf.search()
if path:
for pos_x, pos_y in path:
A__ : Optional[Any] =2
for elem in grid:
print(elem)
| 207
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
snake_case_ : Optional[Any] = TypeVar('KEY')
snake_case_ : Dict = TypeVar('VAL')
@dataclass(frozen=lowercase , slots=lowercase )
class lowercase__ ( Generic[KEY, VAL] ):
lowercase__ = 42
lowercase__ = 42
class lowercase__ ( _Item ):
def __init__( self : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
def __bool__( self : Tuple ):
'''simple docstring'''
return False
snake_case_ : int = _DeletedItem()
class lowercase__ ( MutableMapping[KEY, VAL] ):
def __init__( self : Any ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : float = 0.7_5 ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : Optional[int] = capacity_factor
_UpperCamelCase : List[Any] = 0
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : KEY ):
'''simple docstring'''
return hash(lowerCamelCase__ ) % len(self._buckets )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : int ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : KEY ,lowerCamelCase__ : VAL ):
'''simple docstring'''
_UpperCamelCase : str = self._buckets[ind]
if not stored:
_UpperCamelCase : str = _Item(lowerCamelCase__ ,lowerCamelCase__ )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(lowerCamelCase__ ,lowerCamelCase__ )
return True
else:
return False
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Dict = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : str = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : int = self._buckets
_UpperCamelCase : Dict = [None] * new_size
_UpperCamelCase : str = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : KEY ):
'''simple docstring'''
_UpperCamelCase : List[str] = self._get_bucket_index(lowerCamelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Optional[int] = self._get_next_ind(lowerCamelCase__ )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : KEY ,lowerCamelCase__ : VAL ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase__ ):
if self._try_set(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ):
break
def __setitem__( self : Dict ,lowerCamelCase__ : KEY ,lowerCamelCase__ : VAL ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase__ ,lowerCamelCase__ )
def __delitem__( self : Optional[Any] ,lowerCamelCase__ : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase__ ):
_UpperCamelCase : Any = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase__ )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Dict ,lowerCamelCase__ : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase__ ):
_UpperCamelCase : Any = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase__ )
def __len__( self : Dict ):
'''simple docstring'''
return self._len
def __iter__( self : List[str] ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 195
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
SCREAMING_SNAKE_CASE__ = "base_with_context"
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Tuple:
__lowercase = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__lowercase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowercase = weights[F"""layers_{lyr_num}"""]
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__lowercase = ly_weight["attention"]
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Dict:
__lowercase = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__lowercase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowercase = weights[F"""layers_{lyr_num}"""]
__lowercase = ly_weight["attention"]
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowercase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ) -> Dict:
__lowercase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__lowercase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCamelCase )
__lowercase = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__lowercase = weights[F"""layers_{lyr_num}"""]
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__lowercase = ly_weight["self_attention"]
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase = ly_weight["MultiHeadDotProductAttention_0"]
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowercase = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__lowercase = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> List[Any]:
__lowercase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__lowercase = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
__lowercase = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
__lowercase = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
__lowercase = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
__lowercase = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
__lowercase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
__lowercase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__lowercase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__lowercase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__lowercase = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , _lowerCamelCase )
__lowercase = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , _lowerCamelCase )
__lowercase = load_decoder(ta_checkpoint['target']['decoder'] , _lowerCamelCase )
__lowercase = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__lowercase = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 704
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688
| 0
|
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCAmelCase : Tuple = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class __magic_name__ ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , _a = " " ):
"""simple docstring"""
lowerCamelCase = sentence_delimiter
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return list(_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = []
for sent_idx, sentence in enumerate(_a ):
chars.extend(self.process_string(_a ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_a ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCAmelCase : List[str] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCAmelCase : Tuple = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCAmelCase : Any = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCAmelCase : Union[str, Any] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowerCAmelCase : Dict = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _lowerCAmelCase ( self , _a , _a , _a=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
_a , _a , truth_transform=_a , hypothesis_transform=_a , )["wer"]
lowerCamelCase = 0
lowerCamelCase = 0
for prediction, reference in zip(_a , _a ):
lowerCamelCase = jiwer.compute_measures(
_a , _a , truth_transform=_a , hypothesis_transform=_a , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 543
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_a , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_a , )
lowerCamelCase = AutoencoderKL()
lowerCamelCase = DDIMScheduler()
lowerCamelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _lowerCAmelCase ( self , _a , _a=0 ):
"""simple docstring"""
if str(_a ).startswith("""mps""" ):
lowerCamelCase = torch.manual_seed(_a )
else:
lowerCamelCase = torch.Generator(device=_a ).manual_seed(_a )
lowerCamelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """cpu"""
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCamelCase = self.get_dummy_inputs(_a )
lowerCamelCase = pipe(**_a ).images
lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCamelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
lowerCamelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
lowerCamelCase = pipe.get_label_ids(_a )
lowerCamelCase = pipe(_a , generator=_a , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(_a , _a ):
lowerCamelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
lowerCamelCase = ["""vase""", """umbrella"""]
lowerCamelCase = pipe.get_label_ids(_a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(_a , generator=_a , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(_a , _a ):
lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 543
| 1
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __magic_name__ ( lowerCAmelCase ):
def __get__( self , snake_case , snake_case=None) -> Dict:
'''simple docstring'''
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute')
_UpperCAmelCase : Optional[int] ='__cached_' + self.fget.__name__
_UpperCAmelCase : List[str] =getattr(snake_case , snake_case , snake_case)
if cached is None:
_UpperCAmelCase : Any =self.fget(snake_case)
setattr(snake_case , snake_case , snake_case)
return cached
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"invalid truth value {val!r}" )
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
if is_torch_fx_proxy(__lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(__lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__lowerCamelCase , np.ndarray )
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
'''simple docstring'''
return isinstance(__lowerCamelCase , np.ndarray )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
return _is_numpy(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
import torch
return isinstance(__lowerCamelCase , torch.Tensor )
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
import torch
return isinstance(__lowerCamelCase , torch.device )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
import torch
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if hasattr(__lowerCamelCase , __lowerCamelCase ):
_UpperCAmelCase : Optional[int] =getattr(__lowerCamelCase , __lowerCamelCase )
else:
return False
return isinstance(__lowerCamelCase , torch.dtype )
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
import tensorflow as tf
return isinstance(__lowerCamelCase , tf.Tensor )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__lowerCamelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(__lowerCamelCase )
return type(__lowerCamelCase ) == tf.Tensor
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(__lowerCamelCase , jnp.ndarray )
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(__lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase , (list, tuple) ):
return [to_py_obj(__lowerCamelCase ) for o in obj]
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase ).tolist()
elif isinstance(__lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
if isinstance(__lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase , (list, tuple) ):
return np.array(__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase )
else:
return obj
class __magic_name__ ( lowerCAmelCase ):
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Tuple =fields(self)
# Safety and consistency checks
if not len(snake_case):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
_UpperCAmelCase : int =getattr(self , class_fields[0].name)
_UpperCAmelCase : Any =all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(snake_case):
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Any =first_field.items()
_UpperCAmelCase : Dict =True
else:
try:
_UpperCAmelCase : str =iter(snake_case)
_UpperCAmelCase : Optional[Any] =True
except TypeError:
_UpperCAmelCase : Dict =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(snake_case):
if (
not isinstance(snake_case , (list, tuple))
or not len(snake_case) == 2
or not isinstance(element[0] , snake_case)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_UpperCAmelCase : Any =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value).")
break
setattr(self , element[0] , element[1])
if element[1] is not None:
_UpperCAmelCase : List[Any] =element[1]
elif first_field is not None:
_UpperCAmelCase : List[Any] =first_field
else:
for field in class_fields:
_UpperCAmelCase : int =getattr(self , field.name)
if v is not None:
_UpperCAmelCase : Optional[Any] =v
def __delitem__( self , *snake_case , **snake_case) -> Any:
'''simple docstring'''
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def lowerCAmelCase ( self , *snake_case , **snake_case) -> Any:
'''simple docstring'''
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def lowerCAmelCase ( self , *snake_case , **snake_case) -> Tuple:
'''simple docstring'''
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def lowerCAmelCase ( self , *snake_case , **snake_case) -> str:
'''simple docstring'''
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__( self , snake_case) -> str:
'''simple docstring'''
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Optional[Any] =dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , snake_case , snake_case) -> str:
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(snake_case , snake_case)
super().__setattr__(snake_case , snake_case)
def __setitem__( self , snake_case , snake_case) -> List[str]:
'''simple docstring'''
# Will raise a KeyException if needed
super().__setitem__(snake_case , snake_case)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(snake_case , snake_case)
def lowerCAmelCase ( self) -> Tuple[Any]:
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ):
@classmethod
def lowerCAmelCase ( cls , snake_case) -> List[Any]:
'''simple docstring'''
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}")
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="longest"
UpperCAmelCase ="max_length"
UpperCAmelCase ="do_not_pad"
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="pt"
UpperCAmelCase ="tf"
UpperCAmelCase ="np"
UpperCAmelCase ="jax"
class __magic_name__ :
def __init__( self , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =context_managers
_UpperCAmelCase : str =ExitStack()
def __enter__( self) -> Dict:
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(snake_case)
def __exit__( self , *snake_case , **snake_case) -> Dict:
'''simple docstring'''
self.stack.__exit__(*snake_case , **snake_case)
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
'''simple docstring'''
_UpperCAmelCase : Tuple =infer_framework(__lowerCamelCase )
if framework == "tf":
_UpperCAmelCase : List[str] =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : Optional[Any] =inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : Optional[Any] =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =model_class.__name__
_UpperCAmelCase : Union[str, Any] =infer_framework(__lowerCamelCase )
if framework == "tf":
_UpperCAmelCase : Dict =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : str =inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : Optional[Any] =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCamelCase__ ( __lowerCamelCase : MutableMapping , __lowerCamelCase : str = "" , __lowerCamelCase : str = "." ):
'''simple docstring'''
def _flatten_dict(__lowerCamelCase : Dict , __lowerCamelCase : List[str]="" , __lowerCamelCase : Any="." ):
for k, v in d.items():
_UpperCAmelCase : Optional[Any] =str(__lowerCamelCase ) + delimiter + str(__lowerCamelCase ) if parent_key else k
if v and isinstance(__lowerCamelCase , __lowerCamelCase ):
yield from flatten_dict(__lowerCamelCase , __lowerCamelCase , delimiter=__lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
@contextmanager
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : bool = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Any=None ):
'''simple docstring'''
if is_numpy_array(__lowerCamelCase ):
return np.transpose(__lowerCamelCase , axes=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.T if axes is None else array.permute(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.transpose(__lowerCamelCase , perm=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.transpose(__lowerCamelCase , axes=__lowerCamelCase )
else:
raise ValueError(f"Type not supported for transpose: {type(__lowerCamelCase )}." )
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Dict ):
'''simple docstring'''
if is_numpy_array(__lowerCamelCase ):
return np.reshape(__lowerCamelCase , __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.reshape(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.reshape(__lowerCamelCase , __lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.reshape(__lowerCamelCase , __lowerCamelCase )
else:
raise ValueError(f"Type not supported for reshape: {type(__lowerCamelCase )}." )
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Any=None ):
'''simple docstring'''
if is_numpy_array(__lowerCamelCase ):
return np.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
else:
raise ValueError(f"Type not supported for squeeze: {type(__lowerCamelCase )}." )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if is_numpy_array(__lowerCamelCase ):
return np.expand_dims(__lowerCamelCase , __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.unsqueeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(__lowerCamelCase , axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.expand_dims(__lowerCamelCase , axis=__lowerCamelCase )
else:
raise ValueError(f"Type not supported for expand_dims: {type(__lowerCamelCase )}." )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if is_numpy_array(__lowerCamelCase ):
return np.size(__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.numel()
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.size(__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return array.size
else:
raise ValueError(f"Type not supported for expand_dims: {type(__lowerCamelCase )}." )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : str ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(__lowerCamelCase , (tuple, list) ):
_UpperCAmelCase : Union[str, Any] =[f"{repo_id}--{v}" if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
_UpperCAmelCase : Optional[Any] =f"{repo_id}--{value}"
return auto_map
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
for base_class in inspect.getmro(__lowerCamelCase ):
_UpperCAmelCase : str =base_class.__module__
_UpperCAmelCase : Dict =base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"Could not infer framework from class {model_class}." )
| 331
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : int ='https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
_UpperCAmelCase : Optional[int] =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('RGB' )
return image
def lowerCamelCase__ ( __lowerCamelCase : Any ):
'''simple docstring'''
_UpperCAmelCase : int =[]
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =dct.pop(__lowerCamelCase )
_UpperCAmelCase : Dict =val
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase : str =state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
_UpperCAmelCase : Tuple =state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_UpperCAmelCase : Optional[Any] =torch.cat((q_bias, torch.zeros_like(__lowerCamelCase , requires_grad=__lowerCamelCase ), v_bias) )
_UpperCAmelCase : int =qkv_bias
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =3_6_4 if 'coco' in model_name else 2_2_4
_UpperCAmelCase : Optional[int] =BlipaVisionConfig(image_size=__lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_UpperCAmelCase : Tuple =OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_UpperCAmelCase : Dict =OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_UpperCAmelCase : Union[str, Any] =TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase : Dict =TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
_UpperCAmelCase : str =BlipaConfig(vision_config=__lowerCamelCase , text_config=__lowerCamelCase )
return config, image_size
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=False ):
'''simple docstring'''
_UpperCAmelCase : str =(
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
_UpperCAmelCase : Tuple =tokenizer('\n' , add_special_tokens=__lowerCamelCase ).input_ids[0]
_UpperCAmelCase , _UpperCAmelCase : List[str] =get_blipa_config(__lowerCamelCase , eos_token_id=__lowerCamelCase )
_UpperCAmelCase : Optional[int] =BlipaForConditionalGeneration(__lowerCamelCase ).eval()
_UpperCAmelCase : int ={
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
_UpperCAmelCase , _UpperCAmelCase : Tuple =model_name_to_original[model_name]
# load original model
print('Loading original model...' )
_UpperCAmelCase : Optional[int] ='cuda' if torch.cuda.is_available() else 'cpu'
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =load_model_and_preprocess(
name=__lowerCamelCase , model_type=__lowerCamelCase , is_eval=__lowerCamelCase , device=__lowerCamelCase )
original_model.eval()
print('Done!' )
# update state dict keys
_UpperCAmelCase : List[Any] =original_model.state_dict()
_UpperCAmelCase : Optional[Any] =create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase : Optional[Any] =state_dict.pop(__lowerCamelCase )
if key.startswith('Qformer.bert' ):
_UpperCAmelCase : Tuple =key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
_UpperCAmelCase : Optional[Any] =key.replace('self' , 'attention' )
if "opt_proj" in key:
_UpperCAmelCase : List[str] =key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
_UpperCAmelCase : Tuple =key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
_UpperCAmelCase : Optional[Any] =key.replace('opt' , 'language' )
if key.startswith('t5' ):
_UpperCAmelCase : Dict =key.replace('t5' , 'language' )
_UpperCAmelCase : Any =val
# read in qv biases
read_in_q_v_bias(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase : List[Any] =hf_model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert len(__lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_UpperCAmelCase : Union[str, Any] =load_demo_image()
_UpperCAmelCase : str =vis_processors['eval'](__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase )
_UpperCAmelCase : Any =tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__lowerCamelCase )
# create processor
_UpperCAmelCase : str =BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__lowerCamelCase , image_std=__lowerCamelCase )
_UpperCAmelCase : Union[str, Any] =BlipaProcessor(image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase )
_UpperCAmelCase : str =processor(images=__lowerCamelCase , return_tensors='pt' ).pixel_values.to(__lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowerCamelCase , __lowerCamelCase )
original_model.to(__lowerCamelCase )
hf_model.to(__lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_UpperCAmelCase : Dict =original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
_UpperCAmelCase : int =hf_model(__lowerCamelCase , __lowerCamelCase ).logits
else:
_UpperCAmelCase : Tuple =original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
_UpperCAmelCase : Union[str, Any] =input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
_UpperCAmelCase : Any =hf_model(__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_UpperCAmelCase : Dict =torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=__lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_UpperCAmelCase : Optional[Any] =torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=__lowerCamelCase )
else:
# cast to same type
_UpperCAmelCase : List[str] =logits.dtype
assert torch.allclose(original_logits.to(__lowerCamelCase ) , __lowerCamelCase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
_UpperCAmelCase : str =''
_UpperCAmelCase : Tuple =tokenizer(__lowerCamelCase , return_tensors='pt' ).input_ids.to(__lowerCamelCase )
_UpperCAmelCase : Any =original_model.generate({'image': original_pixel_values} )
_UpperCAmelCase : List[str] =hf_model.generate(
__lowerCamelCase , __lowerCamelCase , do_sample=__lowerCamelCase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , __lowerCamelCase )
_UpperCAmelCase : List[Any] =input_ids.shape[1]
_UpperCAmelCase : Optional[int] =processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =[text.strip() for text in output_text]
print('HF generation:' , __lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f"nielsr/{model_name}" )
hf_model.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
lowercase =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowercase =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 331
| 1
|
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = len(UpperCamelCase__ )
print('''The following activities are selected:''' )
# The first activity is always selected
__UpperCAmelCase = 0
print(UpperCamelCase__ , end=''',''' )
# Consider rest of the activities
for j in range(UpperCamelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCamelCase__ , end=''',''' )
__UpperCAmelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] = [1, 3, 0, 5, 8, 5]
__lowerCAmelCase : Tuple = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 262
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
a : Optional[Any] = tuple[int, int]
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
a__ = vertices
a__ = {
(min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )): weight for edge, weight in edges.items()
}
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
a__ = weight
def lowercase__ ( self ) -> Graph:
"""simple docstring"""
a__ = Graph({min(self.vertices )} , {} )
a__ = 42
a__ = 42
a__ = 42
a__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
a__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
a__ = edge
a__ = weight
subgraph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return subgraph
def __magic_name__ ( UpperCamelCase : str = "p107_network.txt" ) -> int:
a__ = os.path.abspath(os.path.dirname(UpperCamelCase ) )
a__ = os.path.join(UpperCamelCase , UpperCamelCase )
a__ = {}
a__ = 42
a__ = 42
a__ = 42
with open(UpperCamelCase ) as f:
a__ = f.read().strip().split('\n' )
a__ = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCamelCase ) ):
for edgea in range(UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
a__ = int(adjaceny_matrix[edgea][edgea] )
a__ = Graph(set(range(len(UpperCamelCase ) ) ) , UpperCamelCase )
a__ = graph.prims_algorithm()
a__ = sum(graph.edges.values() )
a__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 273
| 0
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 714
|
'''simple docstring'''
A_ = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
A_ = ["a", "b", "c", "d", "e"]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
lowerCamelCase_ = start
# add current to visited
visited.append(__UpperCamelCase )
lowerCamelCase_ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCamelCase_ = topological_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# if all neighbors visited add current to sort
sort.append(__UpperCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
for vertice in vertices:
if vertice not in visited:
lowerCamelCase_ = topological_sort(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# return sort
return sort
if __name__ == "__main__":
A_ = topological_sort("a", [], [])
print(sort)
| 384
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MobileViTFeatureExtractor']
__UpperCAmelCase = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = ["""input_features""", """attention_mask"""]
def __init__( self : Any ,A : str=80 ,A : Optional[int]=16_000 ,A : int=0.0 ,A : str=10 ,A : Any=25 ,A : str="hamming_window" ,A : int=3_2_7_6_8.0 ,A : List[str]=0.9_7 ,A : Optional[int]=1.0 ,A : Optional[Any]=True ,A : Tuple=True ,A : Any=False ,**A : int ,):
'''simple docstring'''
super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A )
UpperCAmelCase__ : str = feature_size
UpperCAmelCase__ : int = sampling_rate
UpperCAmelCase__ : int = padding_value
UpperCAmelCase__ : Dict = hop_length
UpperCAmelCase__ : int = win_length
UpperCAmelCase__ : Dict = frame_signal_scale
UpperCAmelCase__ : Dict = preemphasis_coeff
UpperCAmelCase__ : str = mel_floor
UpperCAmelCase__ : Any = normalize_means
UpperCAmelCase__ : str = normalize_vars
UpperCAmelCase__ : int = win_function
UpperCAmelCase__ : List[Any] = return_attention_mask
UpperCAmelCase__ : str = win_length * sampling_rate // 1_000
UpperCAmelCase__ : List[Any] = hop_length * sampling_rate // 1_000
UpperCAmelCase__ : int = optimal_fft_length(self.sample_size )
UpperCAmelCase__ : List[Any] = (self.n_fft // 2) + 1
def __lowercase ( self : Union[str, Any] ,A : np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A )
else:
UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function )
UpperCAmelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
UpperCAmelCase__ : Optional[Any] = spectrogram(
one_waveform * self.frame_signal_scale ,window=A ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=A ,preemphasis=self.preemphasis_coeff ,mel_filters=A ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def __lowercase ( self : str ,A : Any ,A : Optional[int] ,A : str ):
'''simple docstring'''
# make sure we normalize float32 arrays
if self.normalize_means:
UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 )
UpperCAmelCase__ : Any = np.subtract(A ,A )
if self.normalize_vars:
UpperCAmelCase__ : str = x[:input_length].std(axis=0 )
UpperCAmelCase__ : Optional[int] = np.divide(A ,A )
if input_length < x.shape[0]:
UpperCAmelCase__ : int = padding_value
# make sure array is in float32
UpperCAmelCase__ : str = x.astype(np.floataa )
return x
def __lowercase ( self : Union[str, Any] ,A : List[np.ndarray] ,A : Optional[np.ndarray] = None ):
'''simple docstring'''
UpperCAmelCase__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A ,A ,self.padding_value ) for x, n in zip(A ,A )]
def __call__( self : Union[str, Any] ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : Union[bool, str, PaddingStrategy] = False ,A : Optional[int] = None ,A : bool = False ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[Union[str, TensorType]] = None ,A : Optional[int] = None ,**A : Tuple ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase__ : Optional[Any] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase__ : Any = is_batched_numpy or (
isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ : List[str] = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A ,np.ndarray ):
UpperCAmelCase__ : Union[str, Any] = np.asarray(A ,dtype=np.floataa )
elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ : Optional[Any] = [raw_speech]
# extract fbank features
UpperCAmelCase__ : Tuple = [self._extract_mfsc_features(A ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase__ : str = BatchFeature({"""input_features""": features} )
UpperCAmelCase__ : Optional[Any] = self.pad(
A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,)
# make sure list is in array format
UpperCAmelCase__ : Tuple = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,A ):
UpperCAmelCase__ : Union[str, Any] = [np.asarray(A ,dtype=np.floataa ) for feature in input_features]
UpperCAmelCase__ : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCAmelCase__ : Union[str, Any] = (
np.array(A ,dtype=np.intaa )
if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCAmelCase__ : Any = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=A )
if return_tensors is not None:
UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(A )
return padded_inputs
| 65
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase_ = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class __lowercase ( _UpperCAmelCase ):
_a = """retribert"""
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=8 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=True , UpperCamelCase=128 , UpperCamelCase=0 , **UpperCamelCase , ) -> Tuple:
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = share_encoders
__a = projection_dim
| 702
|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCAmelCase_ = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class __lowercase ( tr.AbstractTransform ):
def __init__( self , UpperCamelCase = " " ) -> Tuple:
__a = sentence_delimiter
def UpperCamelCase__ ( self , UpperCamelCase ) -> Union[str, Any]:
return list(UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase ) -> int:
__a = []
for sent_idx, sentence in enumerate(UpperCamelCase ):
chars.extend(self.process_string(UpperCamelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCAmelCase_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCAmelCase_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCAmelCase_ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCAmelCase_ = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
UpperCAmelCase_ = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def UpperCamelCase__ ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> int:
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase , UpperCamelCase , truth_transform=UpperCamelCase , hypothesis_transform=UpperCamelCase , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(UpperCamelCase , UpperCamelCase ):
__a = jiwer.compute_measures(
UpperCamelCase , UpperCamelCase , truth_transform=UpperCamelCase , hypothesis_transform=UpperCamelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 490
| 0
|
def lowerCamelCase_ ( UpperCamelCase__ : int = 3, UpperCamelCase__ : int = 7, UpperCamelCase__ : int = 100_0000 ):
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = 1
for current_denominator in range(1, limit + 1 ):
UpperCamelCase__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCamelCase__ = current_numerator
UpperCamelCase__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 240
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : Dict = RoCBertTokenizer
_A : Optional[Any] = None
_A : Dict = False
_A : List[str] = True
_A : Optional[int] = filter_non_english
def A_ ( self : int ):
super().setUp()
UpperCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for i, value in enumerate(_a ):
UpperCamelCase__ = i
UpperCamelCase__ = i
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_a , _a , ensure_ascii=_a )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_a , _a , ensure_ascii=_a )
def A_ ( self : List[str] ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_a , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_a ) , [5, 6, 2, 5, 7, 8] )
def A_ ( self : Tuple ):
UpperCamelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A_ ( self : List[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : Any ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : str ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : List[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : Any ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCamelCase__ = {}
for i, token in enumerate(_a ):
UpperCamelCase__ = i
UpperCamelCase__ = RoCBertWordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A_ ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A_ ( self : Optional[Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A_ ( self : List[str] ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
UpperCamelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def A_ ( self : Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCamelCase__ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
UpperCamelCase__ = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
UpperCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A_ ( self : Any ):
UpperCamelCase__ = ['''的''', '''人''', '''有''']
UpperCamelCase__ = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ = True
UpperCamelCase__ = self.tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = tokenizer_p.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(_a )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
UpperCamelCase__ = False
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = tokenizer_r.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_p.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(_a )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase__ = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
@slow
def A_ ( self : str ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ = tokenizer.encode('''你好''' , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.encode('''你是谁''' , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(_a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase__ = '''你好,你是谁'''
UpperCamelCase__ = tokenizer.tokenize(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_shape_ids(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(_a )
UpperCamelCase__ = tokenizer.prepare_for_model(
_a , _a , _a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.encode_plus(_a , add_special_tokens=_a )
self.assertEqual(_a , _a )
| 240
| 1
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__UpperCamelCase : str = HfArgumentParser(InitializationArguments)
__UpperCamelCase : Dict = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__UpperCamelCase : Any = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__UpperCamelCase : Dict = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
__UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__UpperCamelCase : List[str] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 106
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class __magic_name__ ( __lowerCAmelCase):
A: Optional[Any] = "markuplm"
def __init__( self : List[Any] , lowerCamelCase__ : Optional[int]=30522 , lowerCamelCase__ : Dict=768 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Optional[Any]=3072 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Tuple=512 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Union[str, Any]=0.02 , lowerCamelCase__ : Optional[Any]=1E-1_2 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : Tuple=0 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[Any]=256 , lowerCamelCase__ : List[str]=1024 , lowerCamelCase__ : Optional[int]=216 , lowerCamelCase__ : Any=1001 , lowerCamelCase__ : int=32 , lowerCamelCase__ : Tuple=50 , lowerCamelCase__ : str="absolute" , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : Any = hidden_act
UpperCamelCase__ : Dict = intermediate_size
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : Tuple = max_position_embeddings
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : Any = position_embedding_type
UpperCamelCase__ : Optional[Any] = use_cache
UpperCamelCase__ : int = classifier_dropout
# additional properties
UpperCamelCase__ : int = max_depth
UpperCamelCase__ : Optional[Any] = max_xpath_tag_unit_embeddings
UpperCamelCase__ : List[Any] = max_xpath_subs_unit_embeddings
UpperCamelCase__ : List[Any] = tag_pad_id
UpperCamelCase__ : Optional[int] = subs_pad_id
UpperCamelCase__ : Optional[Any] = xpath_unit_hidden_size
| 106
| 1
|
import heapq as hq
import math
from collections.abc import Iterator
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = str(id_ )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = []
UpperCamelCase_ = {} # {vertex:distance}
def __lt__( self , _UpperCAmelCase ) -> Optional[int]:
return self.key < other.key
def __repr__( self ) -> Optional[int]:
return self.id
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
self.neighbors.append(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = weight
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __lowercase)
graph[b - 1].add_edge(graph[a - 1] , __lowercase)
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = []
for u in graph:
UpperCamelCase_ = math.inf
UpperCamelCase_ = None
UpperCamelCase_ = 0
UpperCamelCase_ = graph[:]
while q:
UpperCamelCase_ = min(__lowercase)
q.remove(__lowercase)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCamelCase_ = u
UpperCamelCase_ = u.edges[v.id]
for i in range(1 , len(__lowercase)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def _snake_case (__lowercase , __lowercase):
for u in graph:
UpperCamelCase_ = math.inf
UpperCamelCase_ = None
UpperCamelCase_ = 0
UpperCamelCase_ = list(__lowercase)
hq.heapify(__lowercase)
while h:
UpperCamelCase_ = hq.heappop(__lowercase)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCamelCase_ = u
UpperCamelCase_ = u.edges[v.id]
hq.heapify(__lowercase)
for i in range(1 , len(__lowercase)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def _snake_case ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Tuple = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
__lowerCamelCase : Optional[int] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowerCamelCase : Optional[Any] = model(UpperCAmelCase )["last_hidden_state"]
__lowerCamelCase : int = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice.
__lowerCamelCase : Optional[int] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 646
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : AutoencoderKL , SCREAMING_SNAKE_CASE_ : CLIPTextModel , SCREAMING_SNAKE_CASE_ : CLIPTokenizer , SCREAMING_SNAKE_CASE_ : UNetaDConditionModel , SCREAMING_SNAKE_CASE_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE_ : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE_ : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , SCREAMING_SNAKE_CASE_ : int = 512 , SCREAMING_SNAKE_CASE_ : int = 512 , SCREAMING_SNAKE_CASE_ : int = 50 , SCREAMING_SNAKE_CASE_ : float = 7.5 , SCREAMING_SNAKE_CASE_ : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(SCREAMING_SNAKE_CASE_ )}.' )
# get prompt text embeddings
lowerCAmelCase__ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowerCAmelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowerCAmelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCAmelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = text_embeddings.shape
lowerCAmelCase__ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
lowerCAmelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase__ = 42
if negative_prompt is None:
lowerCAmelCase__ = ['''''']
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='
f' {type(SCREAMING_SNAKE_CASE_ )}.' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''' )
else:
lowerCAmelCase__ = negative_prompt
lowerCAmelCase__ = text_input_ids.shape[-1]
lowerCAmelCase__ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
lowerCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ = uncond_embeddings.shape[1]
lowerCAmelCase__ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
lowerCAmelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCAmelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase__ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
lowerCAmelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='''cpu''' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCAmelCase__ = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowerCAmelCase__ = latents_reference.to(self.device )
lowerCAmelCase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCAmelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCAmelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCAmelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCAmelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCAmelCase__ = 0 if dx < 0 else dx
lowerCAmelCase__ = 0 if dy < 0 else dy
lowerCAmelCase__ = max(-dx , 0 )
lowerCAmelCase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCAmelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ = {}
if accepts_eta:
lowerCAmelCase__ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCAmelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ = noise_pred.chunk(2 )
lowerCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = 1 / 0.18_215 * latents
lowerCAmelCase__ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCAmelCase__ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors='''pt''' ).to(
self.device )
lowerCAmelCase__ , lowerCAmelCase__ = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCAmelCase__ = None
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 288
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase_ :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowerCAmelCase__ = img
lowerCAmelCase__ = img.shape[1]
lowerCAmelCase__ = img.shape[0]
lowerCAmelCase__ = dst_width
lowerCAmelCase__ = dst_height
lowerCAmelCase__ = self.src_w / self.dst_w
lowerCAmelCase__ = self.src_h / self.dst_h
lowerCAmelCase__ = lowerCAmelCase__ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def __snake_case ( self : List[str] ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCAmelCase__ = self.img[self.get_y(SCREAMING_SNAKE_CASE_ )][self.get_x(SCREAMING_SNAKE_CASE_ )]
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
return int(self.ratio_x * x )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
return int(self.ratio_y * y )
if __name__ == "__main__":
_UpperCAmelCase , _UpperCAmelCase : List[str] = 800, 600
_UpperCAmelCase : Tuple = imread("image_data/lena.jpg", 1)
_UpperCAmelCase : str = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 288
| 1
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowercase = False
class _UpperCAmelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def snake_case_ ( self):
A__ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''')
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
A__ = torch.manual_seed(0)
A__ = pipe(
image=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
A__ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A__ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 632
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''timm_backbone'''
def __init__( self , a__=None , a__=3 , a__=True , a__=True , a__=None , **a__ , ):
super().__init__(**a__)
A__ = backbone
A__ = num_channels
A__ = features_only
A__ = use_pretrained_backbone
A__ = True
A__ = out_indices if out_indices is not None else (-1,)
| 632
| 1
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCAmelCase : Optional[Any] = TypeVar("""_T""")
class _UpperCamelCase ( Generic[_T]):
'''simple docstring'''
def __init__( self , a_ = None ) -> None:
lowercase : list[_T] = list(iterable or [] )
lowercase : list[_T] = []
def __len__( self ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self ) -> str:
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def a__ ( self , a_ ) -> None:
self._stacka.append(a_ )
def a__ ( self ) -> _T:
lowercase : int = self._stacka.pop
lowercase : Tuple = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 712
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : Dict = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( A ,A ,A ,A ,A ) -> str:
for attribute in key.split("." ):
lowercase : Any = getattr(A ,A )
if weight_type is not None:
lowercase : Optional[Any] = getattr(A ,A ).shape
else:
lowercase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase : Any = value
elif weight_type == "weight_g":
lowercase : Optional[Any] = value
elif weight_type == "weight_v":
lowercase : Tuple = value
elif weight_type == "bias":
lowercase : int = value
else:
lowercase : int = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( A ,A ) -> int:
lowercase : List[Any] = []
lowercase : int = fairseq_model.state_dict()
lowercase : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
A ,A ,A ,A ,hf_model.config.feat_extract_norm == "group" ,)
lowercase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase : Union[str, Any] = True
if "*" in mapped_key:
lowercase : Dict = name.split(A )[0].split("." )[-2]
lowercase : Union[str, Any] = mapped_key.replace("*" ,A )
if "weight_g" in name:
lowercase : Union[str, Any] = "weight_g"
elif "weight_v" in name:
lowercase : Tuple = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowercase : Union[str, Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Any = "weight"
else:
lowercase : Tuple = None
set_recursively(A ,A ,A ,A ,A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( A ,A ,A ,A ,A ) -> Any:
lowercase : Optional[int] = full_name.split("conv_layers." )[-1]
lowercase : Any = name.split("." )
lowercase : Dict = int(items[0] )
lowercase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def _A ( A ,A ,A=None ) -> Optional[Any]:
# load the pre-trained checkpoints
lowercase : Union[str, Any] = torch.load(A )
lowercase : List[Any] = WavLMConfigOrig(checkpoint["cfg"] )
lowercase : Tuple = WavLMOrig(A )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowercase : List[str] = WavLMConfig.from_pretrained(A )
else:
lowercase : Union[str, Any] = WavLMConfig()
lowercase : Optional[Any] = WavLMModel(A )
recursively_load_weights(A ,A )
hf_wavlm.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase : int = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 425
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_a : List[Any] = logging.get_logger(__name__)
def UpperCamelCase__ ( _A: Tuple ):
'''simple docstring'''
if isinstance(_A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_A ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = ['''pixel_values''']
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 2_5_5 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase = size if size is not None else {"""shortest_edge""": 2_2_4}
__lowerCamelCase = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
__lowerCamelCase = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__lowerCamelCase = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = resample
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = None , **UpperCAmelCase , ):
__lowerCamelCase = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" in size:
__lowerCamelCase = get_resize_output_image_size(UpperCAmelCase , size["""shortest_edge"""] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
__lowerCamelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
__lowerCamelCase = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = to_numpy_array(UpperCAmelCase )
if do_resize:
__lowerCamelCase = self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase )
if do_center_crop:
__lowerCamelCase = self.center_crop(UpperCAmelCase , size=UpperCAmelCase )
if do_rescale:
__lowerCamelCase = self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase )
if do_normalize:
__lowerCamelCase = self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase )
__lowerCamelCase = to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase )
return image
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__lowerCamelCase = make_batched(UpperCAmelCase )
__lowerCamelCase = [
[
self._preprocess_image(
image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , )
for img in video
]
for video in videos
]
__lowerCamelCase = {"""pixel_values""": videos}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 479
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_a : Tuple = logging.get_logger(__name__)
_a : int = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = '''layoutlmv3'''
def __init__( self , UpperCAmelCase=5_0_2_6_5 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-5 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_2_8 , UpperCAmelCase=True , UpperCAmelCase=3_2 , UpperCAmelCase=1_2_8 , UpperCAmelCase=6_4 , UpperCAmelCase=2_5_6 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=2_2_4 , UpperCAmelCase=3 , UpperCAmelCase=1_6 , UpperCAmelCase=None , **UpperCAmelCase , ):
super().__init__(
vocab_size=UpperCAmelCase , hidden_size=UpperCAmelCase , num_hidden_layers=UpperCAmelCase , num_attention_heads=UpperCAmelCase , intermediate_size=UpperCAmelCase , hidden_act=UpperCAmelCase , hidden_dropout_prob=UpperCAmelCase , attention_probs_dropout_prob=UpperCAmelCase , max_position_embeddings=UpperCAmelCase , type_vocab_size=UpperCAmelCase , initializer_range=UpperCAmelCase , layer_norm_eps=UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase = max_ad_position_embeddings
__lowerCamelCase = coordinate_size
__lowerCamelCase = shape_size
__lowerCamelCase = has_relative_attention_bias
__lowerCamelCase = rel_pos_bins
__lowerCamelCase = max_rel_pos
__lowerCamelCase = has_spatial_attention_bias
__lowerCamelCase = rel_ad_pos_bins
__lowerCamelCase = max_rel_ad_pos
__lowerCamelCase = text_embed
__lowerCamelCase = visual_embed
__lowerCamelCase = input_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = classifier_dropout
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = version.parse('''1.12''' )
@property
def lowerCamelCase_ ( self ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCamelCase_ ( self ):
return 1E-5
@property
def lowerCamelCase_ ( self ):
return 1_2
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 3 , UpperCAmelCase = 4_0 , UpperCAmelCase = 4_0 , ):
setattr(processor.image_processor , """apply_ocr""" , UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase )
__lowerCamelCase = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowerCamelCase = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowerCamelCase = self._generate_dummy_images(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = dict(
processor(
UpperCAmelCase , text=UpperCAmelCase , boxes=UpperCAmelCase , return_tensors=UpperCAmelCase , ) )
return inputs
| 479
| 1
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = 0
if start < end:
lowerCAmelCase = randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = a[end]
lowerCAmelCase = a[pivot]
lowerCAmelCase = temp
lowerCAmelCase, lowerCAmelCase = _in_place_partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += _in_place_quick_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , p - 1 )
count += _in_place_quick_sort(_SCREAMING_SNAKE_CASE , p + 1 , _SCREAMING_SNAKE_CASE )
return count
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = a[end]
lowerCAmelCase = a[pivot]
lowerCAmelCase = temp
lowerCAmelCase = start - 1
for index in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase = new_pivot_index + 1
lowerCAmelCase = a[new_pivot_index]
lowerCAmelCase = a[index]
lowerCAmelCase = temp
lowerCAmelCase = a[new_pivot_index + 1]
lowerCAmelCase = a[end]
lowerCAmelCase = temp
return new_pivot_index + 1, count
UpperCAmelCase = TemporaryFile()
UpperCAmelCase = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase = 0, 1 # mean and standard deviation
UpperCAmelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase = np.load(outfile)
UpperCAmelCase = len(M) - 1
UpperCAmelCase = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 344
|
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
lowerCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
lowerCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=16 , A_=2 , A_=4 , A_=4 , A_="relu" , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=20 , A_=2 , A_=1 , A_=0 , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = bos_token_id
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.eos_token_id # Eos Token
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase = self.get_config()
lowerCAmelCase = prepare_mam_aaa_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def __snake_case ( self ) -> Optional[Any]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __snake_case ( self ) -> str:
lowerCAmelCase, lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __snake_case ( self , A_ , A_ ) -> Tuple:
lowerCAmelCase = MaMaaaModel(config=A_ ).get_decoder().to(A_ ).eval()
lowerCAmelCase = inputs_dict["""input_ids"""]
lowerCAmelCase = inputs_dict["""attention_mask"""]
lowerCAmelCase = inputs_dict["""head_mask"""]
# first forward pass
lowerCAmelCase = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
lowerCAmelCase, lowerCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase = model(A_ , attention_mask=A_ )["""last_hidden_state"""]
lowerCAmelCase = model(A_ , attention_mask=A_ , past_key_values=A_ )[
"""last_hidden_state"""
]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-2 ) )
def __snake_case ( self , A_ , A_ ) -> Any:
lowerCAmelCase = MaMaaaModel(config=A_ ).to(A_ ).eval()
lowerCAmelCase = model(**A_ )
lowerCAmelCase = outputs.encoder_last_hidden_state
lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(A_ )
lowerCAmelCase = MaMaaaEncoder.from_pretrained(A_ ).to(A_ )
lowerCAmelCase = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(A_ )
lowerCAmelCase = MaMaaaDecoder.from_pretrained(A_ ).to(A_ )
lowerCAmelCase = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=A_ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Tuple = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase : Any = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
UpperCAmelCase : str = True
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Dict = False
UpperCAmelCase : Dict = False
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ ) -> int:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __snake_case ( self ) -> int:
lowerCAmelCase = MaMaaaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ )
def __snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
lowerCAmelCase, lowerCAmelCase = model_class.from_pretrained(A_ , output_loading_info=A_ )
self.assertEqual(info["""missing_keys"""] , [] )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = copy.deepcopy(self._prepare_for_class(A_ , A_ ) )
if not self.is_encoder_decoder:
lowerCAmelCase = inputs["""input_ids"""]
del inputs["input_ids"]
else:
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs.get("""decoder_input_ids""" , A_ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , A_ )
lowerCAmelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCAmelCase = wte(A_ )
else:
lowerCAmelCase = wte(A_ )
lowerCAmelCase = wte(A_ )
with torch.no_grad():
model(**A_ )[0]
def __snake_case ( self ) -> str:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = input_dict["""input_ids"""]
lowerCAmelCase = input_ids.ne(1 ).to(A_ )
lowerCAmelCase = MaMaaaForConditionalGeneration(A_ ).eval().to(A_ )
if torch_device == "cuda":
model.half()
model.generate(A_ , attention_mask=A_ )
model.generate(num_beams=4 , do_sample=A_ , early_stopping=A_ , num_return_sequences=3 )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ) -> List[Any]:
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def __snake_case ( self ) -> str:
lowerCAmelCase = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(A_ )
lowerCAmelCase = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
lowerCAmelCase = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
lowerCAmelCase = prepare_mam_aaa_inputs_dict(model.config , A_ , A_ )
with torch.no_grad():
lowerCAmelCase = model(**A_ )[0]
lowerCAmelCase = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , A_ )
# change to expected output here
lowerCAmelCase = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=A_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=A_ ) )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(A_ )
# change to intended input
lowerCAmelCase = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
lowerCAmelCase = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
lowerCAmelCase = prepare_mam_aaa_inputs_dict(model.config , A_ , A_ )
with torch.no_grad():
lowerCAmelCase = model(**A_ )[0]
lowerCAmelCase = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , A_ )
# change to expected output here
lowerCAmelCase = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=A_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=A_ ) )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(A_ )
lowerCAmelCase = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
lowerCAmelCase = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCAmelCase = tokenizer(A_ , padding=A_ , return_tensors="""pt""" )
lowerCAmelCase = model.generate(
input_ids=dct["""input_ids"""].to(A_ ) , attention_mask=dct["""attention_mask"""].to(A_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
lowerCAmelCase = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
lowerCAmelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=A_ , skip_special_tokens=A_ )
assert generated == expected_en
| 344
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase : int = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
__lowerCamelCase : Dict = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
a_ = GPTaTokenizer
def __init__( self : Any , __A : int=None , __A : Dict=None , __A : Tuple=None , __A : Union[str, Any]="<|endoftext|>" , __A : str="<|endoftext|>" , __A : str="<|endoftext|>" , __A : int=False , **__A : Optional[Any] , ):
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , add_prefix_space=__A , **__A , )
snake_case__ : int = kwargs.pop("add_bos_token" , __A )
snake_case__ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : List[Any] = getattr(__A , pre_tok_state.pop("type" ) )
snake_case__ : List[Any] = add_prefix_space
snake_case__ : Union[str, Any] = pre_tok_class(**__A )
snake_case__ : Dict = add_prefix_space
def _lowercase ( self : Any , *__A : Any , **__A : Union[str, Any] ):
snake_case__ : Dict = kwargs.get("is_split_into_words" , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A , **__A )
def _lowercase ( self : Tuple , *__A : int , **__A : Dict ):
snake_case__ : Tuple = kwargs.get("is_split_into_words" , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A , **__A )
def _lowercase ( self : Tuple , __A : List[Any] , __A : Dict = None ):
snake_case__ : Any = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _lowercase ( self : str , __A : Dict ):
snake_case__ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
snake_case__ : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 297
|
from itertools import count
def UpperCamelCase__ ( _A: int = 50 ):
'''simple docstring'''
__lowerCamelCase = [1] * min_block_length
for n in count(_A ):
fill_count_functions.append(1 )
for block_length in range(_A , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 479
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
class _UpperCAmelCase :
def __init__( self : Dict , A : int ) -> None:
lowercase_ : Optional[int] = size
# approximate the overall size of segment tree with given value
lowercase_ : int = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowercase_ : Optional[Any] = [0 for i in range(0 , 4 * size )]
lowercase_ : Union[str, Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A ( self : Optional[int] , A : int ) -> int:
return idx * 2
def A ( self : Optional[Any] , A : int ) -> int:
return idx * 2 + 1
def A ( self : List[Any] , A : int , A : int , A : int , A : list[int] ) -> None:
if left_element == right_element:
lowercase_ : List[Any] = a[left_element - 1]
else:
lowercase_ : Optional[Any] = (left_element + right_element) // 2
self.build(self.left(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.build(self.right(lowerCamelCase__ ) , mid + 1 , lowerCamelCase__ , lowerCamelCase__ )
lowercase_ : int = max(
self.segment_tree[self.left(lowerCamelCase__ )] , self.segment_tree[self.right(lowerCamelCase__ )] )
def A ( self : str , A : int , A : int , A : int , A : int , A : int , A : int ) -> bool:
if self.flag[idx] is True:
lowercase_ : Tuple = self.lazy[idx]
lowercase_ : Any = False
if left_element != right_element:
lowercase_ : List[str] = self.lazy[idx]
lowercase_ : List[str] = self.lazy[idx]
lowercase_ : List[Any] = True
lowercase_ : Any = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowercase_ : List[str] = val
if left_element != right_element:
lowercase_ : Tuple = val
lowercase_ : Union[str, Any] = val
lowercase_ : Optional[int] = True
lowercase_ : str = True
return True
lowercase_ : Union[str, Any] = (left_element + right_element) // 2
self.update(self.left(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.update(self.right(lowerCamelCase__ ) , mid + 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase_ : Optional[int] = max(
self.segment_tree[self.left(lowerCamelCase__ )] , self.segment_tree[self.right(lowerCamelCase__ )] )
return True
def A ( self : int , A : int , A : int , A : int , A : int , A : int ) -> int | float:
if self.flag[idx] is True:
lowercase_ : Any = self.lazy[idx]
lowercase_ : str = False
if left_element != right_element:
lowercase_ : Optional[int] = self.lazy[idx]
lowercase_ : Optional[Any] = self.lazy[idx]
lowercase_ : List[Any] = True
lowercase_ : Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowercase_ : Optional[int] = (left_element + right_element) // 2
lowercase_ : str = self.query(self.left(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase_ : List[str] = self.query(self.right(lowerCamelCase__ ) , mid + 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return max(lowerCamelCase__ , lowerCamelCase__ )
def __str__( self : Optional[Any] ) -> str:
return str([self.query(1 , 1 , self.size , lowerCamelCase__ , lowerCamelCase__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
__A : Any = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__A : Tuple = 15
__A : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 718
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A : int = logging.getLogger(__name__)
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = "sequence-classification"
def __init__( self : int , A : Union[str, Any] ) -> Optional[Any]:
if type(A ) == dict:
lowercase_ : Any = Namespace(**A )
lowercase_ : Tuple = glue_output_modes[hparams.task]
lowercase_ : Optional[int] = glue_tasks_num_labels[hparams.task]
super().__init__(A , A , self.mode )
def A ( self : Union[str, Any] , **A : List[Any] ) -> str:
return self.model(**A )
def A ( self : Any , A : List[Any] , A : Union[str, Any] ) -> str:
lowercase_ : Dict = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ : Union[str, Any] = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase_ : int = self(**A )
lowercase_ : Optional[int] = outputs[0]
lowercase_ : Union[str, Any] = self.trainer.lr_schedulers[0]['''scheduler''']
lowercase_ : List[str] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[int] ) -> Union[str, Any]:
lowercase_ : str = self.hparams
lowercase_ : Any = processors[args.task]()
lowercase_ : Union[str, Any] = processor.get_labels()
for mode in ["train", "dev"]:
lowercase_ : Union[str, Any] = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , A )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase_ : Union[str, Any] = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
lowercase_ : Tuple = convert_examples_to_features(
A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , A )
torch.save(A , A )
def A ( self : Optional[Any] , A : str , A : int , A : bool = False ) -> DataLoader:
lowercase_ : Union[str, Any] = '''dev''' if mode == '''test''' else mode
lowercase_ : List[Any] = self._feature_file(A )
logger.info('''Loading features from cached file %s''' , A )
lowercase_ : Optional[int] = torch.load(A )
lowercase_ : str = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase_ : Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowercase_ : List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowercase_ : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowercase_ : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A , A , A , A ) , batch_size=A , shuffle=A , )
def A ( self : Dict , A : str , A : Union[str, Any] ) -> int:
lowercase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ : Any = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase_ : List[str] = self(**A )
lowercase_ , lowercase_ : List[str] = outputs[:2]
lowercase_ : Dict = logits.detach().cpu().numpy()
lowercase_ : Optional[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : List[str] , A : Optional[Any] ) -> tuple:
lowercase_ : Tuple = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
lowercase_ : Any = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowercase_ : Union[str, Any] = np.argmax(A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowercase_ : Optional[Any] = np.squeeze(A )
lowercase_ : str = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase_ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
lowercase_ : str = [[] for _ in range(out_label_ids.shape[0] )]
lowercase_ : List[str] = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , A , A )}
lowercase_ : List[str] = dict(results.items() )
lowercase_ : int = results
return ret, preds_list, out_label_list
def A ( self : str , A : list ) -> dict:
lowercase_ , lowercase_ , lowercase_ : List[Any] = self._eval_end(A )
lowercase_ : Dict = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : str , A : int ) -> dict:
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self._eval_end(A )
lowercase_ : Tuple = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( A : Optional[int] , A : Optional[Any] ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(A , A )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=A , required=A , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowercase ( ):
lowercase_ : str = argparse.ArgumentParser()
add_generic_args(__snake_case , os.getcwd() )
lowercase_ : List[str] = GLUETransformer.add_model_specific_args(__snake_case , os.getcwd() )
lowercase_ : Dict = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase_ : int = os.path.join(
'''./results''' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
lowercase_ : List[Any] = GLUETransformer(__snake_case )
lowercase_ : List[Any] = generic_train(__snake_case , __snake_case )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase_ : List[str] = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__snake_case ) )
lowercase_ : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__snake_case )
if __name__ == "__main__":
main()
| 141
| 0
|
import re
def __a ( lowerCAmelCase_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_= re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(__lowercase ,__lowercase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 593
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor']
_lowerCamelCase = 'SamImageProcessor'
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Optional[Any] = self.image_processor
_snake_case : Tuple = -10
_snake_case : str = self.image_processor.size["longest_edge"]
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
_snake_case : int = original_sizes.numpy()
_snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_snake_case : Dict = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ):
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : int = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ )
_snake_case : Any = np.array(lowercase_ )
if input_labels is not None:
_snake_case : Optional[Any] = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
_snake_case : Tuple = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : List[str] = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Tuple = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Dict = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[Any] = max([point.shape[0] for point in input_points] )
_snake_case : List[str] = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
_snake_case : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
_snake_case : Optional[Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_snake_case ,_snake_case : Optional[int] = original_size
_snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
_snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ):
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
_snake_case : Any = [np.array(lowercase_ ) for input_point in input_points]
else:
_snake_case : Optional[int] = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : Tuple = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
_snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Optional[int] = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ )
| 670
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase_ = 25_00_04
UpperCAmelCase_ = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowercase ( __magic_name__ , unittest.TestCase ):
_a = MBartaaTokenizer
_a = MBartaaTokenizerFast
_a = True
_a = True
def UpperCamelCase__ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
__a = MBartaaTokenizer(UpperCamelCase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ) -> Dict:
__a = '<s>'
__a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ ( self ) -> List[str]:
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCamelCase ) , 1054 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = MBartaaTokenizer(UpperCamelCase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCamelCase )
__a = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__a = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def UpperCamelCase__ ( self ) -> Dict:
# fmt: off
__a = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__a = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__a = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCamelCase )
__a = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCamelCase )
__a = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=True
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
__a = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCamelCase )
__a = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=False
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
__a = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(UpperCamelCase )
__a = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
_a = """facebook/mbart-large-50-one-to-many-mmt"""
_a = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_a = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_a = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[int]:
__a = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
__a = 1
return cls
def UpperCamelCase__ ( self ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def UpperCamelCase__ ( self ) -> int:
__a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
def UpperCamelCase__ ( self ) -> int:
self.assertIn(UpperCamelCase , self.tokenizer.all_special_ids )
__a = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
__a = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
__a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase )
def UpperCamelCase__ ( self ) -> int:
__a = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , UpperCamelCase )
__a = 10
__a = self.tokenizer(UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0] , UpperCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ ( self ) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = tempfile.mkdtemp()
__a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase )
__a = MBartaaTokenizer.from_pretrained(UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase )
@require_torch
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , return_tensors='pt' )
__a = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase__ ( self ) -> int:
__a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__a = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = self.tokenizer(self.src_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=3 , return_tensors='pt' )
__a = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=10 , return_tensors='pt' )
__a = targets['input_ids']
__a = shift_tokens_right(UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase__ ( self ) -> Dict:
__a = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 490
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : int ):
__a = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def SCREAMING_SNAKE_CASE ( a_ : int = 100 ):
__a = 1
__a = 2
for i in range(2 , max_n + 1 ):
__a = pre_numerator
__a = 2 * i // 3 if i % 3 == 0 else 1
__a = cur_numerator
__a = e_cont * pre_numerator + temp
return sum_digits(a_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 490
| 1
|
import math
import tensorflow as tf
from packaging import version
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict:
"""simple docstring"""
a = tf.convert_to_tensor(lowerCamelCase__ )
a = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ), x.dtype ) ))
return x * cdf
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
a = tf.convert_to_tensor(lowerCamelCase__ )
a = tf.cast(math.pi, x.dtype )
a = tf.cast(0.04_4715, x.dtype )
a = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase__, 3 )) ))
return x * cdf
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = tf.convert_to_tensor(lowerCamelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]:
"""simple docstring"""
a = tf.convert_to_tensor(lowerCamelCase__ )
a = tf.cast(0.04_4715, x.dtype )
a = tf.cast(0.79_7884_5608, x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
a = tf.convert_to_tensor(lowerCamelCase__ )
a = tf.cast(1.702, x.dtype )
return x * tf.math.sigmoid(coeff * x )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
"""simple docstring"""
return tf.clip_by_value(_gelu(lowerCamelCase__ ), -1_0, 1_0 )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=-1 ) -> Union[str, Any]:
"""simple docstring"""
a , a = tf.split(lowerCamelCase__, 2, axis=lowerCamelCase__ )
return a * tf.math.sigmoid(lowerCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict:
"""simple docstring"""
return tf.keras.activations.gelu(lowerCamelCase__, approximate=lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = tf.keras.activations.gelu
UpperCamelCase__ : int = approximate_gelu_wrap
else:
UpperCamelCase__ : Dict = _gelu
UpperCamelCase__ : Dict = _gelu_new
UpperCamelCase__ : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 387
|
import random
from typing import Any
def __lowerCamelCase ( lowerCamelCase__ : list ):
'''simple docstring'''
for _ in range(len(lowerCamelCase__ ) ):
lowerCamelCase = random.randint(0 , len(lowerCamelCase__ ) - 1 )
lowerCamelCase = random.randint(0 , len(lowerCamelCase__ ) - 1 )
lowerCamelCase , lowerCamelCase = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase : str = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase : Union[str, Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 457
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: Tuple = logging.get_logger(__name__)
UpperCamelCase__: Optional[int] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """sew-d"""
def __init__( self : str , __snake_case : Tuple=32 , __snake_case : str=768 , __snake_case : Optional[Any]=12 , __snake_case : Optional[int]=12 , __snake_case : List[Any]=3072 , __snake_case : List[Any]=2 , __snake_case : Any=512 , __snake_case : Optional[int]=256 , __snake_case : str=True , __snake_case : str=True , __snake_case : str=("p2c", "c2p") , __snake_case : Optional[Any]="layer_norm" , __snake_case : List[str]="gelu_python" , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=0.02 , __snake_case : Dict=1E-7 , __snake_case : Any=1E-5 , __snake_case : Optional[int]="group" , __snake_case : Optional[Any]="gelu" , __snake_case : Dict=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __snake_case : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __snake_case : Union[str, Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __snake_case : Optional[Any]=False , __snake_case : Dict=128 , __snake_case : int=16 , __snake_case : List[Any]=True , __snake_case : Optional[int]=0.05 , __snake_case : Union[str, Any]=10 , __snake_case : str=2 , __snake_case : Dict=0.0 , __snake_case : Any=10 , __snake_case : List[Any]=0 , __snake_case : Union[str, Any]="mean" , __snake_case : Tuple=False , __snake_case : Optional[int]=False , __snake_case : Optional[int]=256 , __snake_case : Union[str, Any]=0 , __snake_case : Union[str, Any]=1 , __snake_case : Optional[int]=2 , **__snake_case : Union[str, Any] , ) -> Dict:
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : str = feat_extract_norm
UpperCAmelCase : Union[str, Any] = feat_extract_activation
UpperCAmelCase : str = list(__snake_case )
UpperCAmelCase : List[str] = list(__snake_case )
UpperCAmelCase : str = list(__snake_case )
UpperCAmelCase : List[Any] = conv_bias
UpperCAmelCase : Optional[int] = num_conv_pos_embeddings
UpperCAmelCase : Any = num_conv_pos_embedding_groups
UpperCAmelCase : str = len(self.conv_dim )
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Tuple = squeeze_factor
UpperCAmelCase : int = max_position_embeddings
UpperCAmelCase : str = position_buckets
UpperCAmelCase : Dict = share_att_key
UpperCAmelCase : Tuple = relative_attention
UpperCAmelCase : Tuple = norm_rel_ebd
UpperCAmelCase : str = list(__snake_case )
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : int = hidden_dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : Optional[int] = activation_dropout
UpperCAmelCase : Tuple = feat_proj_dropout
UpperCAmelCase : Union[str, Any] = final_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Optional[int] = feature_layer_norm_eps
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Optional[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[int] = apply_spec_augment
UpperCAmelCase : Union[str, Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : Tuple = mask_time_min_masks
UpperCAmelCase : List[Any] = mask_feature_prob
UpperCAmelCase : Optional[Any] = mask_feature_length
UpperCAmelCase : Union[str, Any] = mask_feature_min_masks
# ctc loss
UpperCAmelCase : Any = ctc_loss_reduction
UpperCAmelCase : Any = ctc_zero_infinity
# sequence classification
UpperCAmelCase : Optional[Any] = use_weighted_layer_sum
UpperCAmelCase : List[Any] = classifier_proj_size
@property
def A ( self : Union[str, Any] ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 528
|
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : str = features.copy() if features else default_expected_features
UpperCAmelCase : Optional[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Dict:
with contextlib.closing(sqlitea.connect(_lowerCAmelCase ) ) as con:
UpperCAmelCase : Any = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Optional[int]:
UpperCAmelCase : Optional[int] = tmp_path / '''cache'''
UpperCAmelCase : int = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : Any = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
UpperCAmelCase : List[str] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : Any = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Tuple = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
with pytest.raises(_lowerCAmelCase ):
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 528
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = '▁'
lowerCAmelCase_ : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ : str = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase_ : List[str] = {
'xlm-roberta-base': 5_12,
'xlm-roberta-large': 5_12,
'xlm-roberta-large-finetuned-conll02-dutch': 5_12,
'xlm-roberta-large-finetuned-conll02-spanish': 5_12,
'xlm-roberta-large-finetuned-conll03-english': 5_12,
'xlm-roberta-large-finetuned-conll03-german': 5_12,
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =['input_ids', 'attention_mask']
def __init__( self : Any , __a : Optional[Any] , __a : Tuple="<s>" , __a : int="</s>" , __a : Dict="</s>" , __a : List[Any]="<s>" , __a : Tuple="<unk>" , __a : Union[str, Any]="<pad>" , __a : Optional[int]="<mask>" , __a : Optional[Any] = None , **__a : Dict , ):
_a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
_a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_a = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a = 1
_a = len(self.sp_model ) + self.fairseq_offset
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict ):
_a = self.__dict__.copy()
_a = None
_a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , __a : List[str] ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : int , __a : Any , __a : Union[str, Any] = None , __a : List[Any] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[int] , __a : int = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self : Optional[int] ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCamelCase__ ( self : Any ):
_a = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self : str , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : Any , __a : List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self : Any , __a : Optional[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self : int , __a : Any ):
_a = "".join(__a ).replace(__a , " " ).strip()
return out_string
def UpperCamelCase__ ( self : Optional[int] , __a : List[Any] , __a : List[str] = None ):
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 692
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Dict = 'trocr'
_UpperCamelCase : Union[str, Any] = ['past_key_values']
_UpperCamelCase : Union[str, Any] = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , snake_case=50265 , snake_case=1024 , snake_case=12 , snake_case=16 , snake_case=4096 , snake_case="gelu" , snake_case=512 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=2 , snake_case=0.02 , snake_case=0.0 , snake_case=True , snake_case=False , snake_case=True , snake_case=True , snake_case=1 , snake_case=0 , snake_case=2 , **snake_case , ):
'''simple docstring'''
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = activation_function
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = init_std
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = scale_embedding
UpperCamelCase__ = use_learned_position_embeddings
UpperCamelCase__ = layernorm_embedding
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , decoder_start_token_id=snake_case , **snake_case , )
| 551
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = data
UpperCamelCase = None
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Any:
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
def __iter__( self )-> Iterator[Any]:
'''simple docstring'''
UpperCamelCase = self.head
while self.head:
yield node.data
UpperCamelCase = node.next
if node == self.head:
break
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self )-> Any:
'''simple docstring'''
return "->".join(str(A_ ) for item in iter(self ) )
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
self.insert_nth(len(self ) , A_ )
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
self.insert_nth(0 , A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
UpperCamelCase = Node(A_ )
if self.head is None:
UpperCamelCase = new_node # first node points itself
UpperCamelCase = UpperCamelCase = new_node
elif index == 0: # insert at head
UpperCamelCase = self.head
UpperCamelCase = UpperCamelCase = new_node
else:
UpperCamelCase = self.head
for _ in range(index - 1 ):
UpperCamelCase = temp.next
UpperCamelCase = temp.next
UpperCamelCase = new_node
if index == len(self ) - 1: # insert at tail
UpperCamelCase = new_node
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self , A_ = 0 )-> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
UpperCamelCase = self.head
if self.head == self.tail: # just one node
UpperCamelCase = UpperCamelCase = None
elif index == 0: # delete head node
UpperCamelCase = self.tail.next.next
UpperCamelCase = self.head.next
else:
UpperCamelCase = self.head
for _ in range(index - 1 ):
UpperCamelCase = temp.next
UpperCamelCase = temp.next
UpperCamelCase = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCamelCase = temp
return delete_node.data
def UpperCAmelCase_ ( self )-> bool:
'''simple docstring'''
return len(self ) == 0
def A_( ):
UpperCamelCase = CircularLinkedList()
assert len(A) == 0
assert circular_linked_list.is_empty() is True
assert str(A) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1)
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0)
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5):
assert len(A) == i
circular_linked_list.insert_nth(A , i + 1)
assert str(A) == "->".join(str(A) for i in range(1 , 6))
circular_linked_list.insert_tail(6)
assert str(A) == "->".join(str(A) for i in range(1 , 7))
circular_linked_list.insert_head(0)
assert str(A) == "->".join(str(A) for i in range(0 , 7))
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(A) == "->".join(str(A) for i in range(1 , 6))
assert circular_linked_list.delete_nth(2) == 3
circular_linked_list.insert_nth(2 , 3)
assert str(A) == "->".join(str(A) for i in range(1 , 6))
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 432
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def A_( A : Tuple):
UpperCamelCase = torch.exp(A)
UpperCamelCase = torch.sum(A , dim=1) # sum of exp(x_i)
UpperCamelCase = torch.sum(x * exp_x , dim=1) # sum of x_i * exp(x_i)
return torch.log(A) - B / A
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> List[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase = config.output_attentions
UpperCamelCase = config.output_hidden_states
UpperCamelCase = nn.ModuleList([BertLayer(A_ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = nn.ModuleList([BertHighway(A_ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
if (type(A_ ) is float) or (type(A_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
UpperCamelCase = x
else:
UpperCamelCase = x
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase_ ( self , A_ , A_=None , A_=None , A_=None , A_=None , )-> Tuple:
'''simple docstring'''
UpperCamelCase = ()
UpperCamelCase = ()
UpperCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = layer_module(
A_ , A_ , head_mask[i] , A_ , A_ )
UpperCamelCase = layer_outputs[0]
if self.output_attentions:
UpperCamelCase = all_attentions + (layer_outputs[1],)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = current_outputs + (all_attentions,)
UpperCamelCase = self.highway[i](A_ )
# logits, pooled_output
if not self.training:
UpperCamelCase = highway_exit[0]
UpperCamelCase = entropy(A_ )
UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
UpperCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A_ , i + 1 )
else:
UpperCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = outputs + (all_attentions,)
UpperCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Dict:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config
UpperCamelCase = BertEmbeddings(A_ )
UpperCamelCase = DeeBertEncoder(A_ )
UpperCamelCase = BertPooler(A_ )
self.init_weights()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase_ ( self , A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = value
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A_ )
@add_start_docstrings_to_model_forward(A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , )-> List[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase = torch.ones(A_ , device=A_ )
if encoder_attention_mask is None:
UpperCamelCase = torch.ones(A_ , device=A_ )
if token_type_ids is None:
UpperCamelCase = torch.zeros(A_ , dtype=torch.long , device=A_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase = self.get_extended_attention_mask(A_ , A_ , A_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
UpperCamelCase = encoder_attention_mask[:, None, None, :]
UpperCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase = self.get_head_mask(A_ , self.config.num_hidden_layers )
UpperCamelCase = self.embeddings(
input_ids=A_ , position_ids=A_ , token_type_ids=A_ , inputs_embeds=A_ )
UpperCamelCase = self.encoder(
A_ , attention_mask=A_ , head_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
UpperCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = message
UpperCamelCase = exit_layer # start from 1!
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> Dict:
'''simple docstring'''
super().__init__()
UpperCamelCase = BertPooler(A_ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# "return" pooler_output
# BertModel
UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
UpperCamelCase = bmodel_output[1]
UpperCamelCase = self.dropout(A_ )
UpperCamelCase = self.classifier(A_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Tuple:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = config.num_hidden_layers
UpperCamelCase = DeeBertModel(A_ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=-1 , A_=False , )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.num_layers
try:
UpperCamelCase = self.bert(
A_ , attention_mask=A_ , token_type_ids=A_ , position_ids=A_ , head_mask=A_ , inputs_embeds=A_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
UpperCamelCase = outputs[1]
UpperCamelCase = self.dropout(A_ )
UpperCamelCase = self.classifier(A_ )
UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase = e.message
UpperCamelCase = e.exit_layer
UpperCamelCase = outputs[0]
if not self.training:
UpperCamelCase = entropy(A_ )
UpperCamelCase = []
UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase = []
for highway_exit in outputs[-1]:
UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase = (loss,) + outputs
if not self.training:
UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 432
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = path_or_paths
lowerCAmelCase = split if split or isinstance(__lowerCAmelCase , __lowerCAmelCase) else """train"""
lowerCAmelCase = features
lowerCAmelCase = cache_dir
lowerCAmelCase = keep_in_memory
lowerCAmelCase = streaming
lowerCAmelCase = num_proc
lowerCAmelCase = kwargs
@abstractmethod
def a_ ( self):
"""simple docstring"""
pass
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = features
lowerCAmelCase = cache_dir
lowerCAmelCase = keep_in_memory
lowerCAmelCase = streaming
lowerCAmelCase = num_proc
lowerCAmelCase = kwargs
@abstractmethod
def a_ ( self):
"""simple docstring"""
pass
| 370
|
'''simple docstring'''
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(_A , _A ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 370
| 1
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
def __init__( self :Union[str, Any] , _lowercase :int , _lowercase :List[Any]=13 , _lowercase :Optional[Any]=7 , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :Optional[int]=True , _lowercase :str=True , _lowercase :List[Any]=99 , _lowercase :int=64 , _lowercase :List[str]=5 , _lowercase :Union[str, Any]=4 , _lowercase :int=37 , _lowercase :Optional[Any]="gelu" , _lowercase :List[Any]=0.1 , _lowercase :Any=0.1 , _lowercase :Any=512 , _lowercase :Union[str, Any]=16 , _lowercase :Optional[Any]=2 , _lowercase :Any=0.02 , _lowercase :Union[str, Any]=3 , _lowercase :str=4 , _lowercase :List[str]=None , ) -> Optional[Any]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
UpperCAmelCase_ = vocab_size - 1
def __a ( self :int) -> Any:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Optional[int]) -> Any:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] , _lowercase :int , _lowercase :int , _lowercase :Union[str, Any]) -> Optional[int]:
UpperCAmelCase_ = GPTNeoXModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
UpperCAmelCase_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __a ( self :str , _lowercase :int , _lowercase :int , _lowercase :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = True
UpperCAmelCase_ = GPTNeoXModel(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __a ( self :List[Any] , _lowercase :List[Any] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = GPTNeoXForCausalLM(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __a ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :List[str] , _lowercase :Dict) -> str:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = GPTNeoXForQuestionAnswering(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __a ( self :Optional[int] , _lowercase :Optional[Any] , _lowercase :Optional[Any] , _lowercase :int , _lowercase :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = GPTNeoXForSequenceClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __a ( self :List[str] , _lowercase :Any , _lowercase :Optional[Any] , _lowercase :int , _lowercase :int) -> Union[str, Any]:
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = GPTNeoXForTokenClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __a ( self :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :int) -> List[str]:
UpperCAmelCase_ = True
UpperCAmelCase_ = GPTNeoXForCausalLM(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
# first forward pass
UpperCAmelCase_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__)
UpperCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1)
UpperCAmelCase_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__)
UpperCAmelCase_ = output_from_no_past["hidden_states"][0]
UpperCAmelCase_ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0]
# select random slice
UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
def __a ( self :Tuple) -> Tuple:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
UpperCamelCase__ : int =(
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : str =(GPTNeoXForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : int =(
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : str =False
def __a ( self :Any) -> Optional[int]:
UpperCAmelCase_ = GPTNeoXModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=64 , num_attention_heads=8)
def __a ( self :Tuple) -> Optional[Any]:
self.config_tester.run_common_tests()
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def __a ( self :Tuple) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_ = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__)
def __a ( self :List[str]) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__)
def __a ( self :Union[str, Any]) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__)
@unittest.skip(reason='''Feed forward chunking is not implemented''')
def __a ( self :Optional[Any]) -> Union[str, Any]:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)])
def __a ( self :Any , _lowercase :List[Any]) -> str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ids_tensor([1, 10] , config.vocab_size)
UpperCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ = GPTNeoXModel(lowerCAmelCase__)
original_model.to(lowerCAmelCase__)
original_model.eval()
UpperCAmelCase_ = original_model(lowerCAmelCase__).last_hidden_state
UpperCAmelCase_ = original_model(lowerCAmelCase__).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ = {"type": scaling_type, "factor": 10.0}
UpperCAmelCase_ = GPTNeoXModel(lowerCAmelCase__)
scaled_model.to(lowerCAmelCase__)
scaled_model.eval()
UpperCAmelCase_ = scaled_model(lowerCAmelCase__).last_hidden_state
UpperCAmelCase_ = scaled_model(lowerCAmelCase__).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
else:
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
@require_torch
class a_ ( unittest.TestCase ):
@slow
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''')
for checkpointing in [True, False]:
UpperCAmelCase_ = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''')
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCAmelCase__)
UpperCAmelCase_ = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowerCAmelCase__)
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase_ = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
UpperCAmelCase_ = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=20)
UpperCAmelCase_ = tokenizer.batch_decode(lowerCAmelCase__)[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 709
|
from collections import defaultdict
class a_ :
def __init__( self :Tuple , _lowercase :List[str] , _lowercase :List[Any]) -> List[Any]:
UpperCAmelCase_ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCAmelCase_ = [
[-1 for i in range(total + 1)] for j in range(2 ** len(_lowercase))
]
UpperCAmelCase_ = defaultdict(_lowercase) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCAmelCase_ = (1 << len(_lowercase)) - 1
def __a ( self :str , _lowercase :int , _lowercase :Optional[Any]) -> Optional[int]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCAmelCase_ = self.count_ways_until(_lowercase , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
UpperCAmelCase_ = total_ways_util
return self.dp[mask][task_no]
def __a ( self :List[Any] , _lowercase :Any) -> Optional[Any]:
# Store the list of persons for each task
for i in range(len(_lowercase)):
for j in task_performed[i]:
self.task[j].append(_lowercase)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
UpperCamelCase_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
UpperCamelCase_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 561
| 0
|
from collections import deque
class _lowerCAmelCase :
def __init__( self : Optional[Any] , __snake_case : str , __snake_case : int , __snake_case : int ):
lowerCamelCase :Optional[int] = process_name # process name
lowerCamelCase :List[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase :List[str] = arrival_time
lowerCamelCase :Optional[Any] = burst_time # remaining burst time
lowerCamelCase :List[str] = 0 # total time of the process wait in ready queue
lowerCamelCase :Tuple = 0 # time from arrival time to completion time
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : int , __snake_case : list[int] , __snake_case : deque[Process] , __snake_case : int , ):
# total number of mlfq's queues
lowerCamelCase :Tuple = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase :List[Any] = time_slices
# unfinished process is in this ready_queue
lowerCamelCase :Any = queue
# current time
lowerCamelCase :int = current_time
# finished process is in this sequence queue
lowerCamelCase :deque[Process] = deque()
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case ( self : Optional[int] , __snake_case : list[Process] ):
lowerCamelCase :Optional[Any] = []
for i in range(len(__snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case ( self : Dict , __snake_case : list[Process] ):
lowerCamelCase :str = []
for i in range(len(__snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case ( self : int , __snake_case : list[Process] ):
lowerCamelCase :List[str] = []
for i in range(len(__snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case ( self : Tuple , __snake_case : deque[Process] ):
return [q.burst_time for q in queue]
def snake_case ( self : int , __snake_case : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case ( self : Optional[int] , __snake_case : deque[Process] ):
lowerCamelCase :deque[Process] = deque() # sequence deque of finished process
while len(__snake_case ) != 0:
lowerCamelCase :Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase :Union[str, Any] = 0
# set the process's turnaround time because it is finished
lowerCamelCase :str = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase :List[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case ( self : Dict , __snake_case : deque[Process] , __snake_case : int ):
lowerCamelCase :deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__snake_case ) ):
lowerCamelCase :Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase :Dict = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase :List[Any] = 0
# set the finish time
lowerCamelCase :List[str] = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase :Union[str, Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case ( self : Optional[Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase , lowerCamelCase :int = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A__ = Process("""P1""", 0, 53)
A__ = Process("""P2""", 0, 17)
A__ = Process("""P3""", 0, 68)
A__ = Process("""P4""", 0, 24)
A__ = 3
A__ = [17, 25]
A__ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
A__ = Process("""P1""", 0, 53)
A__ = Process("""P2""", 0, 17)
A__ = Process("""P3""", 0, 68)
A__ = Process("""P4""", 0, 24)
A__ = 3
A__ = [17, 25]
A__ = deque([Pa, Pa, Pa, Pa])
A__ = MLFQ(number_of_queues, time_slices, queue, 0)
A__ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 166
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166
| 1
|
from __future__ import annotations
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
__snake_case , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class _UpperCamelCase (a_ ):
snake_case_ = """markuplm"""
def __init__( self , __UpperCamelCase=3_0_5_2_2 , __UpperCamelCase=7_6_8 , __UpperCamelCase=1_2 , __UpperCamelCase=1_2 , __UpperCamelCase=3_0_7_2 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1e-12 , __UpperCamelCase=0 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=2_5_6 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=2_1_6 , __UpperCamelCase=1_0_0_1 , __UpperCamelCase=3_2 , __UpperCamelCase=5_0 , __UpperCamelCase="absolute" , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , )-> Dict:
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
# additional properties
__lowerCAmelCase = max_depth
__lowerCAmelCase = max_xpath_tag_unit_embeddings
__lowerCAmelCase = max_xpath_subs_unit_embeddings
__lowerCAmelCase = tag_pad_id
__lowerCAmelCase = subs_pad_id
__lowerCAmelCase = xpath_unit_hidden_size
| 290
| 0
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class snake_case__ ( __snake_case ):
'''simple docstring'''
@require_torch
def UpperCamelCase ( self : str ) -> List[str]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase_ = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCAmelCase_ = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCAmelCase_ = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='''fill-mask''' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCAmelCase_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase_ = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCAmelCase_ = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCAmelCase_ = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='''fill-mask''' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCAmelCase_ = self.get_env()
UpperCAmelCase_ = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self : Any ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase_ = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
UpperCAmelCase_ = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
UpperCAmelCase_ = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCAmelCase_ = self.get_env()
UpperCAmelCase_ = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self : Optional[int] ) -> Any:
UpperCAmelCase_ = '''
from transformers import pipeline
'''
UpperCAmelCase_ = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
UpperCAmelCase_ = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
UpperCAmelCase_ = self.get_env()
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
UpperCAmelCase_ = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ = '''
from transformers import AutoModel
'''
UpperCAmelCase_ = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
UpperCAmelCase_ = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCAmelCase_ = self.get_env()
UpperCAmelCase_ = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase_ = '''1'''
UpperCAmelCase_ = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 121
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : str = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121
| 1
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
A_ = _symbol_database.Default()
A_ = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
A_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
A_ = None
A_ = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
A_ = 45
A_ = 15_81
A_ = 15_17
A_ = 15_70
A_ = 15_84
A_ = 17_93
A_ = 17_95
A_ = 19_16
A_ = 18_64
A_ = 19_05
A_ = 19_19
A_ = 24_29
A_ = 22_08
A_ = 24_18
A_ = 23_23
A_ = 24_07
# @@protoc_insertion_point(module_scope)
| 708
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ = logging.get_logger(__name__)
A_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
A_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
A_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
A_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
A_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
A_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
A_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
A_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
A_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
A_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
A_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
A_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
A_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
A_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
A_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_MAPPING
A_ = auto_class_update(FlaxAutoModel)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
_snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 123
| 0
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( A_, A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = TapasConfig.from_json_file(A__ )
# set absolute/relative position embeddings parameter
__magic_name__ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__magic_name__ = TapasForQuestionAnswering(config=A__ )
elif task == "WTQ":
# run_task_main.py hparams
__magic_name__ = 4
__magic_name__ = True
# hparam_utils.py hparams
__magic_name__ = 0.664694
__magic_name__ = 0.207951
__magic_name__ = 0.121194
__magic_name__ = True
__magic_name__ = True
__magic_name__ = False
__magic_name__ = 0.0352513
__magic_name__ = TapasForQuestionAnswering(config=A__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__magic_name__ = 4
__magic_name__ = False
# hparam_utils.py hparams
__magic_name__ = 36.4519
__magic_name__ = 0.903421
__magic_name__ = 222.088
__magic_name__ = True
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 0.763141
__magic_name__ = TapasForQuestionAnswering(config=A__ )
elif task == "TABFACT":
__magic_name__ = TapasForSequenceClassification(config=A__ )
elif task == "MLM":
__magic_name__ = TapasForMaskedLM(config=A__ )
elif task == "INTERMEDIATE_PRETRAINING":
__magic_name__ = TapasModel(config=A__ )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(A__, A__, A__ )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A__ )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
__magic_name__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""", model_max_length=512 )
tokenizer.save_pretrained(A__ )
print("""Used relative position embeddings:""", model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 529
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A : int = logging.get_logger(__name__)
__A : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__A : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__A : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__A : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__A : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__A : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__A : Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__A : str = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__A : Dict = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__A : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__A : Any = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__A : Optional[int] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__A : List[str] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__A : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_MAPPING
__A : Optional[int] = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 16
| 0
|
import math
def _snake_case ( __snake_case ) -> bool:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__snake_case )
def _snake_case ( __snake_case = 1 / 1_2_3_4_5 ) -> int:
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 3
while True:
UpperCAmelCase_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__snake_case ):
UpperCAmelCase_ : Optional[Any] = int(__snake_case )
total_partitions += 1
if check_partition_perfect(__snake_case ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__snake_case )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 455
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case_ :
"""simple docstring"""
def __init__( self ,lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase="resnet50" ,lowercase=3 ,lowercase=32 ,lowercase=3 ,lowercase=True ,lowercase=True ,):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : int = out_indices if out_indices is not None else [4]
UpperCAmelCase_ : Any = stage_names
UpperCAmelCase_ : List[Any] = out_features
UpperCAmelCase_ : List[str] = backbone
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[Any] = use_pretrained_backbone
UpperCAmelCase_ : Optional[int] = is_training
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values
def A_ ( self):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def A_ ( self ,lowercase ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = TimmBackbone(config=lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowercase)
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TimmBackbone,) if is_torch_available() else ()
_lowerCamelCase = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = TimmBackboneModelTester(self)
UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=lowercase ,has_text_modality=lowercase)
def A_ ( self):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : int = "resnet18"
UpperCAmelCase_ : Tuple = "microsoft/resnet-18"
UpperCAmelCase_ : int = AutoBackbone.from_pretrained(lowercase ,use_timm_backbone=lowercase)
UpperCAmelCase_ : Dict = AutoBackbone.from_pretrained(lowercase)
self.assertEqual(len(timm_model.out_features) ,len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) ,len(transformers_model.stage_names))
self.assertEqual(timm_model.channels ,transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,))
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names) - 1])
UpperCAmelCase_ : Dict = AutoBackbone.from_pretrained(lowercase ,use_timm_backbone=lowercase ,out_indices=[1, 2, 3])
UpperCAmelCase_ : List[str] = AutoBackbone.from_pretrained(lowercase ,out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) ,len(transformers_model.out_features))
self.assertEqual(timm_model.channels ,transformers_model.channels)
@unittest.skip("TimmBackbone doesn't support feed forward chunking")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone.")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone.")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration.")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions.")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm.")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def A_ ( self):
"""simple docstring"""
pass
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(lowercase)
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,lowercase)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCAmelCase_ : str = self.all_model_classes[0]
UpperCAmelCase_ : List[Any] = model_class(lowercase)
model.to(lowercase)
UpperCAmelCase_ : Any = self._prepare_for_class(lowercase ,lowercase)
UpperCAmelCase_ : Optional[int] = model(**lowercase)
UpperCAmelCase_ : Optional[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCAmelCase_ : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCAmelCase_ : int = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(lowercase)
model.to(lowercase)
model.eval()
UpperCAmelCase_ : Tuple = model(**lowercase)
self.assertEqual(len(result.feature_maps) ,len(config.out_indices))
self.assertEqual(len(model.channels) ,len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCAmelCase_ : Tuple = copy.deepcopy(lowercase)
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
UpperCAmelCase_ : Tuple = model(**lowercase)
self.assertEqual(len(result.feature_maps) ,1)
self.assertEqual(len(model.channels) ,1)
# Check backbone can be initialized with fresh weights
UpperCAmelCase_ : str = copy.deepcopy(lowercase)
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = model_class(lowercase)
model.to(lowercase)
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(**lowercase)
| 455
| 1
|
import numpy as np
_A = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _lowerCAmelCase :
def __init__( self ) -> None:
SCREAMING_SNAKE_CASE : str =np.array(snake_case_ )
def __a ( self , snake_case_ ) -> np.ndarray:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] =np.where(letter == self.SQUARE )
SCREAMING_SNAKE_CASE : Optional[Any] =np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __a ( self , snake_case_ , snake_case_ ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] =self.SQUARE[indexa - 1, indexa - 1]
return letter
def __a ( self , snake_case_ ) -> str:
SCREAMING_SNAKE_CASE : Tuple =message.lower()
SCREAMING_SNAKE_CASE : Tuple =message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE : List[Any] =message.replace('''j''' , '''i''' )
SCREAMING_SNAKE_CASE : Optional[int] =np.empty((2, len(snake_case_ )) )
for letter_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : Any =self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE : Dict =numbers[0]
SCREAMING_SNAKE_CASE : Optional[int] =numbers[1]
SCREAMING_SNAKE_CASE : Optional[int] =first_step.reshape(2 * len(snake_case_ ) )
SCREAMING_SNAKE_CASE : str =''''''
for numbers_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : Dict =int(second_step[numbers_index * 2] )
SCREAMING_SNAKE_CASE : str =int(second_step[(numbers_index * 2) + 1] )
SCREAMING_SNAKE_CASE : Union[str, Any] =self.numbers_to_letter(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] =encoded_message + letter
return encoded_message
def __a ( self , snake_case_ ) -> str:
SCREAMING_SNAKE_CASE : Tuple =message.lower()
message.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE : List[Any] =np.empty(2 * len(snake_case_ ) )
for letter_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : Optional[int] =self.letter_to_numbers(message[letter_index] )
SCREAMING_SNAKE_CASE : str =numbers[0]
SCREAMING_SNAKE_CASE : int =numbers[1]
SCREAMING_SNAKE_CASE : Optional[int] =first_step.reshape((2, len(snake_case_ )) )
SCREAMING_SNAKE_CASE : Optional[Any] =''''''
for numbers_index in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE : List[Any] =int(second_step[0, numbers_index] )
SCREAMING_SNAKE_CASE : Optional[int] =int(second_step[1, numbers_index] )
SCREAMING_SNAKE_CASE : int =self.numbers_to_letter(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : int =decoded_message + letter
return decoded_message
| 258
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=56 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=2 , snake_case_=7 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , snake_case_="block_sparse" , snake_case_=True , snake_case_=False , snake_case_=2 , snake_case_=3 , ) -> Dict:
SCREAMING_SNAKE_CASE : Tuple =parent
SCREAMING_SNAKE_CASE : int =batch_size
SCREAMING_SNAKE_CASE : List[str] =seq_length
SCREAMING_SNAKE_CASE : Tuple =is_training
SCREAMING_SNAKE_CASE : Dict =use_attention_mask
SCREAMING_SNAKE_CASE : List[Any] =use_token_type_ids
SCREAMING_SNAKE_CASE : str =use_labels
SCREAMING_SNAKE_CASE : Dict =vocab_size
SCREAMING_SNAKE_CASE : str =hidden_size
SCREAMING_SNAKE_CASE : Dict =num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] =num_attention_heads
SCREAMING_SNAKE_CASE : List[str] =intermediate_size
SCREAMING_SNAKE_CASE : List[str] =hidden_act
SCREAMING_SNAKE_CASE : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str =max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] =type_vocab_size
SCREAMING_SNAKE_CASE : Tuple =type_sequence_label_size
SCREAMING_SNAKE_CASE : str =initializer_range
SCREAMING_SNAKE_CASE : List[Any] =num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] =rescale_embeddings
SCREAMING_SNAKE_CASE : Tuple =attention_type
SCREAMING_SNAKE_CASE : str =use_bias
SCREAMING_SNAKE_CASE : List[str] =block_size
SCREAMING_SNAKE_CASE : Optional[Any] =num_random_blocks
def __a ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int =None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Any =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Tuple =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] =BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] =config_and_inputs
SCREAMING_SNAKE_CASE : str ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[Any] =FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self ) -> str:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self ) -> Optional[int]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self ) -> Dict:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self ) -> Any:
super().test_hidden_states_output()
@slow
def __a ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict =model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(snake_case_ )
def __a ( self ) -> str:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __a ( self ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Dict =self._prepare_for_class(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] =model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , snake_case_=None , **snake_case_ ):
return model(input_ids=snake_case_ , attention_mask=snake_case_ , **snake_case_ )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] =model_jitted(**snake_case_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Optional[Any] =model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=1E-5 , snake_case_="outputs" , snake_case_=None ) -> int:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 258
| 1
|
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case__ :
@staticmethod
def A_ ( *__a : Tuple , **__a : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def a_ ( _UpperCAmelCase : Image ) -> str:
__snake_case : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def a_ ( _UpperCAmelCase : Image ) -> Dict:
__snake_case : List[str] = np.array(_UpperCAmelCase )
__snake_case : Optional[Any] = npimg.shape
return {"hash": hashimage(_UpperCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
A__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def A_ ( self : Union[str, Any] , __a : List[str] , __a : Optional[Any] , __a : Optional[int] ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = MaskGenerationPipeline(model=__a , image_processor=__a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def A_ ( self : Dict , __a : Optional[Any] , __a : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def A_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@slow
@require_torch
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
__snake_case : Any = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
__snake_case : Optional[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = 'facebook/sam-vit-huge'
__snake_case : Dict = pipeline('mask-generation' , model=__a )
__snake_case : int = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__snake_case : Optional[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] , )
| 124
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = SMALL_MODEL_IDENTIFIER
__snake_case : str = 'pt'
__snake_case : int = 'tf'
def A_ ( self : str , __a : Any ) -> Tuple:
'''simple docstring'''
__snake_case : str = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__a )
def A_ ( self : List[Any] , __a : List[Any] ) -> str:
'''simple docstring'''
__snake_case : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=__a )
model_tf.save_pretrained(__a )
def A_ ( self : Any ) -> Any:
'''simple docstring'''
__snake_case : Any = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case : int = FeaturesManager.determine_framework(self.test_model , __a )
self.assertEqual(__a , __a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Dict = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a )
self.assertEqual(__a , __a )
def A_ ( self : int ) -> List[str]:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__a )
__snake_case : Any = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__a )
__snake_case : List[str] = FeaturesManager.determine_framework(__a )
self.assertEqual(__a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__a ):
__snake_case : List[str] = FeaturesManager.determine_framework(__a )
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ):
__snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case : int = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_torch_available' , __a ):
__snake_case : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case : Tuple = MagicMock(return_value=__a )
__snake_case : Optional[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
__snake_case : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__a , self.framework_pt )
# Both not in environment -> raise error
__snake_case : int = MagicMock(return_value=__a )
__snake_case : Optional[Any] = MagicMock(return_value=__a )
with patch('transformers.onnx.features.is_tf_available' , __a ), patch(
'transformers.onnx.features.is_torch_available' , __a ):
with self.assertRaises(__a ):
__snake_case : Any = FeaturesManager.determine_framework(self.test_model )
| 124
| 1
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
A : int = pd.read_csv("""sample_data.csv""", header=None)
A : List[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
A : Optional[int] = df.iloc[:, 1:2]
A : int = actual_data.values.reshape(len_data, 1)
A : str = MinMaxScaler().fit_transform(actual_data)
A : List[str] = 10
A : int = 5
A : Dict = 20
A : Tuple = len_data - periods * look_back
A : List[Any] = actual_data[:division]
A : List[str] = actual_data[division - look_back :]
A , A : Union[str, Any] = [], []
A , A : Optional[int] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
A : List[str] = np.array(train_x)
A : Dict = np.array(test_x)
A : List[str] = np.array([list(i.ravel()) for i in train_y])
A : Union[str, Any] = np.array([list(i.ravel()) for i in test_y])
A : Tuple = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
A : str = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
A : Dict = model.predict(x_test)
| 349
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowerCAmelCase_ :
__UpperCAmelCase = MBartConfig
__UpperCAmelCase = {}
__UpperCAmelCase = 'gelu'
def __init__( self : Dict, _snake_case : Dict, _snake_case : Dict=13, _snake_case : Dict=7, _snake_case : Dict=True, _snake_case : Optional[Any]=False, _snake_case : Optional[int]=99, _snake_case : Union[str, Any]=32, _snake_case : List[Any]=2, _snake_case : List[Any]=4, _snake_case : Any=37, _snake_case : int=0.1, _snake_case : Optional[int]=0.1, _snake_case : int=20, _snake_case : int=2, _snake_case : Any=1, _snake_case : List[Any]=0, ):
'''simple docstring'''
snake_case : Optional[Any] =parent
snake_case : Any =batch_size
snake_case : List[Any] =seq_length
snake_case : int =is_training
snake_case : Dict =use_labels
snake_case : Optional[int] =vocab_size
snake_case : Tuple =hidden_size
snake_case : Optional[Any] =num_hidden_layers
snake_case : Tuple =num_attention_heads
snake_case : Union[str, Any] =intermediate_size
snake_case : Any =hidden_dropout_prob
snake_case : Union[str, Any] =attention_probs_dropout_prob
snake_case : List[Any] =max_position_embeddings
snake_case : List[Any] =eos_token_id
snake_case : int =pad_token_id
snake_case : int =bos_token_id
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Optional[Any] =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
snake_case : List[str] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
snake_case : List[Any] =tf.concat([input_ids, eos_tensor], axis=1 )
snake_case : Dict =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
snake_case : List[Any] =prepare_mbart_inputs_dict(_snake_case, _snake_case, _snake_case )
return config, inputs_dict
def __snake_case ( self : Optional[Any], _snake_case : Dict, _snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case : Dict =TFMBartModel(config=_snake_case ).get_decoder()
snake_case : Tuple =inputs_dict['''input_ids''']
snake_case : List[str] =input_ids[:1, :]
snake_case : Optional[Any] =inputs_dict['''attention_mask'''][:1, :]
snake_case : List[str] =inputs_dict['''head_mask''']
snake_case : str =1
# first forward pass
snake_case : Union[str, Any] =model(_snake_case, attention_mask=_snake_case, head_mask=_snake_case, use_cache=_snake_case )
snake_case , snake_case : List[Any] =outputs.to_tuple()
snake_case : Dict =past_key_values[1]
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ):
if attention_mask is None:
snake_case : Any =tf.cast(tf.math.not_equal(lowerCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case : Optional[int] =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case : Any =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case : Optional[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase_ ( a_ , a_ , unittest.TestCase ):
__UpperCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
def __snake_case ( self : List[str], _snake_case : Optional[Any], _snake_case : List[str], _snake_case : int, _snake_case : Union[str, Any], _snake_case : str ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Dict =TFMBartModelTester(self )
snake_case : Union[str, Any] =ConfigTester(self, config_class=_snake_case )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : List[str] ):
'''simple docstring'''
snake_case : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
__UpperCAmelCase = [
' UN Chief Says There Is No Military Solution in Syria',
]
__UpperCAmelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__UpperCAmelCase = 'facebook/mbart-large-en-ro'
@cached_property
def __snake_case ( self : Dict ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __snake_case ( self : str ):
'''simple docstring'''
snake_case : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __snake_case ( self : Optional[Any], **_snake_case : Optional[int] ):
'''simple docstring'''
snake_case : Tuple =self.translate_src_text(**_snake_case )
self.assertListEqual(self.expected_text, _snake_case )
def __snake_case ( self : Dict, **_snake_case : str ):
'''simple docstring'''
snake_case : int =self.tokenizer(self.src_text, **_snake_case, return_tensors='''tf''' )
snake_case : Optional[int] =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 )
snake_case : List[str] =self.tokenizer.batch_decode(_snake_case, skip_special_tokens=_snake_case )
return generated_words
@slow
def __snake_case ( self : int ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 349
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowercase = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
lowercase = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def lowercase__ ( self : int ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int = CHRF.CHAR_ORDER , __UpperCamelCase : int = CHRF.WORD_ORDER , __UpperCamelCase : int = CHRF.BETA , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , ):
lowerCamelCase_ = len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCamelCase_ = [[refs[i] for refs in references] for i in range(__UpperCamelCase )]
lowerCamelCase_ = CHRF(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = sb_chrf.corpus_score(__UpperCamelCase , __UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 103
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase = 2_5_0_0_0_4
lowercase = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class __A( UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MBartaaTokenizer
SCREAMING_SNAKE_CASE = MBartaaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowercase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartaaTokenizer(__UpperCamelCase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : str ):
lowerCamelCase_ = """<s>"""
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__UpperCamelCase ) , 1_0_5_4 )
def lowercase__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def lowercase__ ( self : str ):
lowerCamelCase_ = MBartaaTokenizer(__UpperCamelCase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=__UpperCamelCase )
lowerCamelCase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCamelCase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowercase__ ( self : Optional[int] ):
# fmt: off
lowerCamelCase_ = {"""input_ids""": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def lowercase__ ( self : Dict ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A( unittest.TestCase ):
SCREAMING_SNAKE_CASE = '''facebook/mbart-large-50-one-to-many-mmt'''
SCREAMING_SNAKE_CASE = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
SCREAMING_SNAKE_CASE = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
SCREAMING_SNAKE_CASE = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def lowercase__ ( cls : Optional[Any] ):
lowerCamelCase_ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
lowerCamelCase_ = 1
return cls
def lowercase__ ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 2_5_0_0_3_8 )
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
def lowercase__ ( self : Tuple ):
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __UpperCamelCase )
lowerCamelCase_ = 1_0
lowerCamelCase_ = self.tokenizer(__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0] , __UpperCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
def lowercase__ ( self : Any ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def lowercase__ ( self : Dict ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCamelCase )
lowerCamelCase_ = MBartaaTokenizer.from_pretrained(__UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCamelCase )
@require_torch
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase_ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCamelCase_ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = self.tokenizer(self.src_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=3 , return_tensors="""pt""" )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=1_0 , return_tensors="""pt""" )
lowerCamelCase_ = targets["""input_ids"""]
lowerCamelCase_ = shift_tokens_right(__UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
# en_XX, A, test, EOS
"""input_ids""": [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 103
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
import math
def __lowerCAmelCase ( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment
SCREAMING_SNAKE_CASE = [True] * (end + 1)
SCREAMING_SNAKE_CASE = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCamelCase )
for i in range(start * start , end + 1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = False
start += 1
prime += in_prime
SCREAMING_SNAKE_CASE = end + 1
SCREAMING_SNAKE_CASE = min(2 * end , _UpperCamelCase )
while low <= n:
SCREAMING_SNAKE_CASE = [True] * (high - low + 1)
for each in in_prime:
SCREAMING_SNAKE_CASE = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = False
for j in range(len(_UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
SCREAMING_SNAKE_CASE = high + 1
SCREAMING_SNAKE_CASE = min(high + end , _UpperCamelCase )
return prime
print(sieve(10**6))
| 439
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
(lowercase__) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 717
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 0
|
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def A_( A : Optional[int] , A : Any , A : Union[str, Any] , A : Optional[int]=None , A : List[str]=None):
# Recurse if needed
if "." in tensor_name:
UpperCamelCase = tensor_name.split('.')
for split in splits[:-1]:
UpperCamelCase = getattr(A , A)
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''')
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''')
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(A , A)
if old_value.device == torch.device('meta') and device not in ["meta", torch.device('meta')] and value is None:
raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''')
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn , 'Params4bit') and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit)
UpperCamelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams)
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(A)
elif isinstance(A , torch.Tensor):
UpperCamelCase = value.to('cpu')
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('bitsandbytes')) > version.parse(
'0.37.2')
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.')
else:
UpperCamelCase = torch.tensor(A , device='cpu')
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , A) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(A , requires_grad=A , **A).to(A)
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(A , requires_grad=A , **A).to(A)
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(A))
else:
if value is None:
UpperCamelCase = old_value.to(A)
elif isinstance(A , torch.Tensor):
UpperCamelCase = value.to(A)
else:
UpperCamelCase = torch.tensor(A , device=A)
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(A , requires_grad=old_value.requires_grad)
UpperCamelCase = new_value
def A_( A : Union[str, Any] , A : str=None , A : List[str]=None , A : str=None , A : Dict=False):
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(A)
if (isinstance(A , nn.Linear) or isinstance(A , A)) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(A) for key in modules_to_not_convert):
with init_empty_weights():
if isinstance(A , A):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
A , A , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
A , A , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(A)
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(A)
if len(list(module.children())) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
A , A , A , A , has_been_replaced=A , )
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def A_( A : Union[str, Any] , A : List[Any]=None , A : Optional[Any]=None , A : str=None):
UpperCamelCase = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
A , A , A , A)
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.')
return model
def A_( *A : Tuple , **A : Optional[int]):
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , A , )
return replace_with_bnb_linear(*A , **A)
def A_( *A : List[str] , **A : Tuple):
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , A , )
return set_module_quantized_tensor_to_device(*A , **A)
def A_( A : int):
UpperCamelCase = deepcopy(A) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(A)
# For compatibility with Accelerate < 0.18
if isinstance(A , A):
UpperCamelCase = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
UpperCamelCase = sum(A , [])
UpperCamelCase = len(A) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(A , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children())
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(A) - set(A)
UpperCamelCase = list(set(A)) + list(A)
# remove ".weight" from the keys
UpperCamelCase = ['.weight', '.bias']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(A , '')
filtered_module_names.append(A)
return filtered_module_names
| 3
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_A = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_A = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
_A = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = LxmertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
SCREAMING_SNAKE_CASE : List[str] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case_ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : str =getattr(snake_case_ , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[str] =do_lower_case
SCREAMING_SNAKE_CASE : Dict =strip_accents
SCREAMING_SNAKE_CASE : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Union[str, Any] =normalizer_class(**snake_case_ )
SCREAMING_SNAKE_CASE : str =do_lower_case
def __a ( self , snake_case_ , snake_case_=None ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , snake_case_ , snake_case_ = None ) -> List[int]:
SCREAMING_SNAKE_CASE : Dict =[self.sep_token_id]
SCREAMING_SNAKE_CASE : Any =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE : List[Any] =self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 258
| 0
|
"""simple docstring"""
import string
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase__ : List[str] = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase__ : Tuple = string.ascii_uppercase.find(_A )
UpperCAmelCase__ : Tuple = num - key
if num < 0:
UpperCAmelCase__ : Optional[Any] = num + len(string.ascii_uppercase )
UpperCAmelCase__ : str = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase__ : Any = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = input("""Encrypted message: """ )
UpperCAmelCase__ : Union[str, Any] = message.upper()
decrypt(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709
|
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__UpperCAmelCase = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class __lowercase ( tr.AbstractTransform ):
def __init__( self : List[str] ,A : str = " " ):
'''simple docstring'''
UpperCAmelCase__ : str = sentence_delimiter
def __lowercase ( self : Union[str, Any] ,A : str ):
'''simple docstring'''
return list(A )
def __lowercase ( self : int ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
for sent_idx, sentence in enumerate(A ):
chars.extend(self.process_string(A ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(A ) - 1:
chars.append(self.sentence_delimiter )
return chars
__UpperCAmelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__UpperCAmelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__UpperCAmelCase = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__UpperCAmelCase = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__UpperCAmelCase = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __lowercase ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/jitsi/jiwer/"""] ,reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] ,)
def __lowercase ( self : Union[str, Any] ,A : Optional[Any] ,A : Tuple ,A : str=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
A ,A ,truth_transform=A ,hypothesis_transform=A ,)["wer"]
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Any = 0
for prediction, reference in zip(A ,A ):
UpperCAmelCase__ : List[str] = jiwer.compute_measures(
A ,A ,truth_transform=A ,hypothesis_transform=A ,)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 194
| 0
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
lowerCamelCase__ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def A__ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : Tuple = False
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = 'ybelkada/fonts'
def SCREAMING_SNAKE_CASE__ ( ) -> str:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
requires_backends(lowercase ,["""torch"""] )
_check_torch_version()
snake_case : List[str] = image_tensor.unsqueeze(0 )
snake_case : Tuple = torch.nn.functional.unfold(lowercase ,(patch_height, patch_width) ,stride=(patch_height, patch_width) )
snake_case : Optional[int] = patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,lowercase ,lowercase ,-1 )
snake_case : List[str] = patches.permute(0 ,4 ,2 ,3 ,1 ).reshape(
image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,)
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = 36 ,lowercase = "black" ,lowercase = "white" ,lowercase = 5 ,lowercase = 5 ,lowercase = 5 ,lowercase = 5 ,lowercase = None ,lowercase = None ,) -> Image.Image:
requires_backends(lowercase ,"""vision""" )
# Add new lines so that each line is no more than 80 characters.
snake_case : List[str] = textwrap.TextWrapper(width=80 )
snake_case : List[Any] = wrapper.wrap(text=lowercase )
snake_case : int = """\n""".join(lowercase )
if font_bytes is not None and font_path is None:
snake_case : int = io.BytesIO(lowercase )
elif font_path is not None:
snake_case : str = font_path
else:
snake_case : int = hf_hub_download(lowercase ,"""Arial.TTF""" )
snake_case : Optional[int] = ImageFont.truetype(lowercase ,encoding="""UTF-8""" ,size=lowercase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case : Tuple = ImageDraw.Draw(Image.new("""RGB""" ,(1, 1) ,lowercase ) )
snake_case , snake_case , snake_case , snake_case : Any = temp_draw.textbbox((0, 0) ,lowercase ,lowercase )
# Create the actual image with a bit of padding around the text.
snake_case : Optional[int] = text_width + left_padding + right_padding
snake_case : List[Any] = text_height + top_padding + bottom_padding
snake_case : List[str] = Image.new("""RGB""" ,(image_width, image_height) ,lowercase )
snake_case : List[str] = ImageDraw.Draw(lowercase )
draw.text(xy=(left_padding, top_padding) ,text=lowercase ,fill=lowercase ,font=lowercase )
return image
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,**lowercase ) -> Any:
requires_backends(lowercase ,"""vision""" )
# Convert to PIL image if necessary
snake_case : Union[str, Any] = to_pil_image(lowercase )
snake_case : str = render_text(lowercase ,**lowercase )
snake_case : Any = max(header_image.width ,image.width )
snake_case : str = int(image.height * (new_width / image.width) )
snake_case : Union[str, Any] = int(header_image.height * (new_width / header_image.width) )
snake_case : List[str] = Image.new("""RGB""" ,(new_width, new_height + new_header_height) ,"""white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) )
new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) )
# Convert back to the original framework if necessary
snake_case : Any = to_numpy_array(lowercase )
if infer_channel_dimension_format(lowercase ) == ChannelDimension.LAST:
snake_case : str = to_channel_dimension_format(lowercase ,ChannelDimension.LAST )
return new_image
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flattened_patches"""]
def __init__( self , A = True , A = True , A = None , A = 2_0_4_8 , A = False , **A , ) -> None:
super().__init__(**A )
snake_case : int = patch_size if patch_size is not None else {"""height""": 1_6, """width""": 1_6}
snake_case : Optional[Any] = do_normalize
snake_case : List[Any] = do_convert_rgb
snake_case : str = max_patches
snake_case : List[Any] = is_vqa
def UpperCAmelCase ( self , A , A , A , **A ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
snake_case : Tuple = to_channel_dimension_format(A , ChannelDimension.FIRST )
snake_case : Optional[int] = torch.from_numpy(A )
snake_case , snake_case : int = patch_size["""height"""], patch_size["""width"""]
snake_case , snake_case : Tuple = get_image_size(A )
# maximize scale s.t.
snake_case : str = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
snake_case : Dict = max(min(math.floor(scale * image_height / patch_height ) , A ) , 1 )
snake_case : int = max(min(math.floor(scale * image_width / patch_width ) , A ) , 1 )
snake_case : Dict = max(num_feasible_rows * patch_height , 1 )
snake_case : Union[str, Any] = max(num_feasible_cols * patch_width , 1 )
snake_case : Tuple = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=A , antialias=A , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case : Optional[Any] = torch_extract_patches(A , A , A )
snake_case : List[Any] = patches.shape
snake_case : List[str] = patches_shape[1]
snake_case : int = patches_shape[2]
snake_case : Tuple = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case : Tuple = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
snake_case : Dict = torch.arange(A ).reshape([rows, 1] ).repeat(1 , A ).reshape([rows * columns, 1] )
snake_case : Union[str, Any] = torch.arange(A ).reshape([1, columns] ).repeat(A , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case : Dict = row_ids.to(torch.floataa )
snake_case : Dict = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case : Any = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case : Tuple = torch.nn.functional.pad(A , [0, 0, 0, max_patches - (rows * columns)] ).float()
snake_case : Optional[int] = to_numpy_array(A )
return result
def UpperCAmelCase ( self , A , A = None , **A ) -> np.ndarray:
if image.dtype == np.uinta:
snake_case : Any = image.astype(np.floataa )
# take mean across the whole `image`
snake_case : Optional[int] = np.mean(A )
snake_case : Dict = np.std(A )
snake_case : Optional[Any] = max(A , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(A , mean=A , std=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> ImageInput:
snake_case : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case : Union[str, Any] = patch_size if patch_size is not None else self.patch_size
snake_case : Optional[int] = max_patches if max_patches is not None else self.max_patches
snake_case : Any = self.is_vqa
if kwargs.get("""data_format""" , A ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case : Dict = [convert_to_rgb(A ) for image in images]
# All transformations expect numpy arrays.
snake_case : List[Any] = [to_numpy_array(A ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
snake_case : str = kwargs.pop("""font_bytes""" , A )
snake_case : Union[str, Any] = kwargs.pop("""font_path""" , A )
if isinstance(A , A ):
snake_case : List[str] = [header_text] * len(A )
snake_case : int = [
render_header(A , header_text[i] , font_bytes=A , font_path=A )
for i, image in enumerate(A )
]
if do_normalize:
snake_case : int = [self.normalize(image=A ) for image in images]
# convert to torch tensor and permute
snake_case : Dict = [
self.extract_flattened_patches(image=A , max_patches=A , patch_size=A )
for image in images
]
# create attention mask in numpy
snake_case : List[str] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
snake_case : Tuple = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=A )
return encoded_outputs
| 587
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : str = """▁"""
lowercase : List[str] = {"""vocab_file""": """spiece.model"""}
lowercase : List[str] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
lowercase : Any = {
"""google/reformer-crime-and-punishment""": 5_2_4_2_8_8,
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self : List[str] , A_ : str , A_ : Optional[int]="</s>" , A_ : Optional[int]="<unk>" , A_ : List[str]=[] , A_ : Optional[Dict[str, Any]] = None , **A_ : int , ) -> None:
"""simple docstring"""
lowerCamelCase_: int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_ , unk_token=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
lowerCamelCase_: Union[str, Any] = vocab_file
lowerCamelCase_: Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowerCAmelCase ( self : List[Any] ) -> Dict[str, int]:
"""simple docstring"""
lowerCamelCase_: List[Any] = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_: List[Any] = self.__dict__.copy()
lowerCamelCase_: Any = None
return state
def __setstate__( self : Tuple , A_ : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase_: Optional[int] = {}
lowerCamelCase_: str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Optional[Any] , A_ : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def lowerCAmelCase ( self : Optional[Any] , A_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(A_ )
def lowerCAmelCase ( self : List[Any] , A_ : Tuple ) -> Any:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
lowerCamelCase_: str = self.sp_model.IdToPiece(A_ )
return token
def lowerCAmelCase ( self : List[Any] , A_ : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = []
lowerCamelCase_: Optional[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
lowerCamelCase_: int = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def lowerCAmelCase ( self : List[Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_: List[Any] = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , """wb""" ) as fi:
lowerCamelCase_: List[str] = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 584
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 584
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'gpt_neox_japanese'
def __init__( self : Any , snake_case_ : str=32000 , snake_case_ : Any=2560 , snake_case_ : List[str]=32 , snake_case_ : List[str]=32 , snake_case_ : Dict=4 , snake_case_ : Tuple="gelu" , snake_case_ : Optional[int]=1.00 , snake_case_ : List[Any]=10000 , snake_case_ : Any=2048 , snake_case_ : str=0.02 , snake_case_ : Union[str, Any]=1e-5 , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=31996 , snake_case_ : Tuple=31999 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[Any]=0.0 , **snake_case_ : List[Any] , ):
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_multiple_size
__snake_case = hidden_act
__snake_case = rotary_pct
__snake_case = rotary_emb_base
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = use_cache
__snake_case = attention_dropout
__snake_case = hidden_dropout
| 163
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __magic_name__ ( lowercase__ ):
def __init__( self : str , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 163
| 1
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowerCAmelCase( a__ : Dict ):
'''simple docstring'''
lowerCamelCase__ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f"""{test_file} instead.""" )
lowerCamelCase__ = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
lowerCamelCase__ = components[:-1] + [test_fn.replace(".py" , "" )]
lowerCamelCase__ = ".".join(a__ )
return test_module_path
def lowerCAmelCase( a__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ = get_module_path(a__ )
lowerCamelCase__ = importlib.import_module(a__ )
return test_module
def lowerCAmelCase( a__ : Any ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = get_test_module(a__ )
for attr in dir(a__ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(a__ , a__ ) )
# sort with class names
return sorted(a__ , key=lambda a__ : x.__name__ )
def lowerCAmelCase( a__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = get_test_module(a__ )
for attr in dir(a__ ):
lowerCamelCase__ = getattr(a__ , a__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase__ = getattr(a__ , "all_model_classes" , [] )
if len(a__ ) > 0:
test_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda a__ : x.__name__ )
def lowerCAmelCase( a__ : Dict ):
'''simple docstring'''
lowerCamelCase__ = get_test_classes(a__ )
lowerCamelCase__ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(a__ , key=lambda a__ : x.__name__ )
def lowerCAmelCase( a__ : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = test_class()
if hasattr(a__ , "setUp" ):
test.setUp()
lowerCamelCase__ = None
if hasattr(a__ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase__ = test.model_tester.__class__
return model_tester
def lowerCAmelCase( a__ : Any , a__ : Dict ):
'''simple docstring'''
lowerCamelCase__ = get_test_classes(a__ )
lowerCamelCase__ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda a__ : x.__name__ )
def lowerCAmelCase( a__ : int , a__ : List[str] ):
'''simple docstring'''
lowerCamelCase__ = get_test_classes_for_model(a__ , a__ )
lowerCamelCase__ = []
for test_class in test_classes:
lowerCamelCase__ = get_model_tester_from_test_class(a__ )
if tester_class is not None:
tester_classes.append(a__ )
# sort with class names
return sorted(a__ , key=lambda a__ : x.__name__ )
def lowerCAmelCase( a__ : int ):
'''simple docstring'''
lowerCamelCase__ = get_test_classes(a__ )
lowerCamelCase__ = {test_class: get_model_tester_from_test_class(a__ ) for test_class in test_classes}
return test_tester_mapping
def lowerCAmelCase( a__ : List[str] ):
'''simple docstring'''
lowerCamelCase__ = get_model_classes(a__ )
lowerCamelCase__ = {
model_class: get_test_classes_for_model(a__ , a__ ) for model_class in model_classes
}
return model_test_mapping
def lowerCAmelCase( a__ : int ):
'''simple docstring'''
lowerCamelCase__ = get_model_classes(a__ )
lowerCamelCase__ = {
model_class: get_tester_classes_for_model(a__ , a__ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCAmelCase( a__ : Tuple ):
'''simple docstring'''
if isinstance(a__ , a__ ):
return o
elif isinstance(a__ , a__ ):
return o.__name__
elif isinstance(a__ , (list, tuple) ):
return [to_json(a__ ) for x in o]
elif isinstance(a__ , a__ ):
return {to_json(a__ ): to_json(a__ ) for k, v in o.items()}
else:
return o
| 706
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase_ = False
try:
lowerCAmelCase_ = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class snake_case_ :
"""simple docstring"""
def __init__( self , UpperCamelCase = None , UpperCamelCase = []):
lowerCamelCase__ = 0
lowerCamelCase__ = choices
lowerCamelCase__ = prompt
if sys.platform == "win32":
lowerCamelCase__ = "*"
else:
lowerCamelCase__ = "➔ "
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase = ""):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase)
else:
forceWrite(self.choices[index] , UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase):
if index == self.position:
forceWrite(f""" {self.arrow_char} """)
self.write_choice(UpperCamelCase)
else:
forceWrite(f""" {self.choices[index]}""")
reset_cursor()
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase = 1):
lowerCamelCase__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase)
move_cursor(UpperCamelCase , direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def __UpperCAmelCase ( self):
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def __UpperCAmelCase ( self):
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def __UpperCAmelCase ( self):
move_cursor(len(self.choices) - self.position , "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def __UpperCAmelCase ( self):
move_cursor(len(self.choices) - self.position , "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase)] for number in range(10)])
def __UpperCAmelCase ( self):
lowerCamelCase__ = int(chr(self.current_selection))
lowerCamelCase__ = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP , -movement)
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase)
else:
return
else:
return
def __UpperCAmelCase ( self , UpperCamelCase = 0):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n")
lowerCamelCase__ = default_choice
for i in range(len(self.choices)):
self.print_choice(UpperCamelCase)
forceWrite("\n")
move_cursor(len(self.choices) - self.position , "UP")
with cursor.hide():
while True:
if in_colab:
try:
lowerCamelCase__ = int(builtins.input())
except ValueError:
lowerCamelCase__ = default_choice
else:
lowerCamelCase__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1 , "UP")
clear_line()
self.write_choice(UpperCamelCase , "\n")
return choice
| 426
| 0
|
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_snake_case : List[Any] = logging.getLogger(__name__)
_snake_case : Union[str, Any] = """pytorch_model.bin"""
@dataclasses.dataclass
class A :
lowercase_ = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
lowercase_ = dataclasses.field(
default=__lowercase ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} ,)
@dataclasses.dataclass
class A :
lowercase_ = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
lowercase_ = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
lowercase_ = dataclasses.field(
default=__lowercase ,metadata={'help': 'A csv or a json file containing the validation data.'} )
lowercase_ = dataclasses.field(
default=__lowercase ,metadata={'help': 'The name of the task to train on.'} ,)
lowercase_ = dataclasses.field(
default=__lowercase ,metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class A :
lowercase_ = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
lowercase_ = dataclasses.field(
default='accuracy' ,metadata={'help': 'The evaluation metric used for the task.'} )
lowercase_ = dataclasses.field(
default='no' ,metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} ,)
lowercase_ = dataclasses.field(
default=10 ,metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} ,)
lowercase_ = dataclasses.field(
default=0.0 ,metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} ,)
lowercase_ = dataclasses.field(
default=__lowercase ,metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} ,)
lowercase_ = dataclasses.field(
default=__lowercase ,metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} ,)
lowercase_ = dataclasses.field(
default=__lowercase ,metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} ,)
lowercase_ = dataclasses.field(
default=0.0 ,metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} ,)
lowercase_ = dataclasses.field(
default=100 ,metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} ,)
lowercase_ = dataclasses.field(
default=__lowercase ,metadata={'help': 'Random seed for initialization.'} ,)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_a = dataset.filter(lambda UpperCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_a = int(eval_result * len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
_a = dataset.sort('''probability''' , reverse=UpperCamelCase__ )
_a = dataset.select(range(UpperCamelCase__ ) )
_a = dataset.remove_columns(['''label''', '''probability'''] )
_a = dataset.rename_column('''prediction''' , '''label''' )
_a = dataset.map(lambda UpperCamelCase : {"label": idalabel[example["label"]]} )
_a = dataset.shuffle(seed=args.seed )
_a = os.path.join(UpperCamelCase__ , f'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(UpperCamelCase__ , index=UpperCamelCase__ )
else:
dataset.to_json(UpperCamelCase__ )
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int , **UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_a = STModelArguments(model_name_or_path=UpperCamelCase__ )
_a = STDataArguments(train_file=UpperCamelCase__ , infer_file=UpperCamelCase__ )
_a = STTrainingArguments(output_dir=UpperCamelCase__ )
_a = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(UpperCamelCase__ ).items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for key, value in kwargs.items():
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Sanity checks
_a = {}
_a = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_a = args.train_file
_a = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_a = args.eval_file
for key in data_files:
_a = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
_a = extension
else:
assert extension == args.data_file_extension, f'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), f'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
_a = f'{args.output_dir}/self-train_iter-{{}}'.format
_a = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
accelerator.wait_for_everyone()
_a = None
_a = None
_a = 0
_a = False
# Show the progress bar
_a = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_a = data_dir_format(UpperCamelCase__ )
assert os.path.exists(UpperCamelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_a = os.path.join(UpperCamelCase__ , '''stage-1''' )
_a = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
arguments_dict.update({key: value} )
_a = os.path.join(UpperCamelCase__ , '''best-checkpoint''' , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , UpperCamelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_a = os.path.join(UpperCamelCase__ , '''best-checkpoint''' )
_a = os.path.join(UpperCamelCase__ , '''stage-2''' )
# Update arguments_dict
_a = model_path
_a = data_files['''train''']
_a = current_output_dir
_a = os.path.join(UpperCamelCase__ , '''best-checkpoint''' , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , UpperCamelCase__ )
_a = iteration
_a = data_dir_format(iteration + 1 )
_a = AutoConfig.from_pretrained(os.path.join(UpperCamelCase__ , '''best-checkpoint''' ) )
_a = config.idalabel
_a = os.path.join(UpperCamelCase__ , '''eval_results_best-checkpoint.json''' )
_a = os.path.join(UpperCamelCase__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(UpperCamelCase__ )
with open(UpperCamelCase__ , '''r''' ) as f:
_a = float(json.load(UpperCamelCase__ )[args.eval_metric] )
_a = os.path.join(UpperCamelCase__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(UpperCamelCase__ )
# Loading the dataset from local csv or json files.
_a = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
_a = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , f'eval_results_iter-{iteration}.json' ) )
if os.path.exists(UpperCamelCase__ ):
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , f'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.wait_for_everyone()
_a = os.path.join(UpperCamelCase__ , f'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_a = eval_result
if best_iteration is None:
_a = new_iteration
_a = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_a = new_iteration
_a = new_eval_result
_a = 0
else:
if new_eval_result == best_eval_result:
_a = new_iteration
_a = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_a = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , UpperCamelCase__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , f'eval_results_iter-{iteration}.json' ) , os.path.join(UpperCamelCase__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , f'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(UpperCamelCase__ , '''eval_results_best-iteration.json''' ) , )
| 22
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class a ( __lowercase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , streaming=_lowerCAmelCase , num_proc=_lowerCAmelCase , **_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE: Any = Generator(
cache_dir=_lowerCAmelCase , features=_lowerCAmelCase , generator=_lowerCAmelCase , gen_kwargs=_lowerCAmelCase , **_lowerCAmelCase , )
def snake_case_ ( self ):
"""simple docstring"""
if self.streaming:
__SCREAMING_SNAKE_CASE: List[str] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE: str = None
__SCREAMING_SNAKE_CASE: List[Any] = None
__SCREAMING_SNAKE_CASE: Tuple = None
__SCREAMING_SNAKE_CASE: Optional[Any] = None
self.builder.download_and_prepare(
download_config=_lowerCAmelCase , download_mode=_lowerCAmelCase , verification_mode=_lowerCAmelCase , base_path=_lowerCAmelCase , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE: List[str] = self.builder.as_dataset(
split='''train''' , verification_mode=_lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 202
| 0
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : str=True , UpperCamelCase__ : int=True , UpperCamelCase__ : int=True , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : int=6_4 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[Any]=3_7 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : Union[str, Any]=1_6 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : int=4 , UpperCamelCase__ : List[str]=None , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = embedding_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length])
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
snake_case__ = ids_tensor([self.batch_size] , self.num_choices)
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __magic_name__ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = MobileBertModel(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
snake_case__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase)
snake_case__ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase)
snake_case__ = model(__UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def __magic_name__ ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = MobileBertForMaskedLM(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
snake_case__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __magic_name__ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = MobileBertForNextSentencePrediction(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
snake_case__ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = MobileBertForPreTraining(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
snake_case__ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , next_sentence_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = MobileBertForQuestionAnswering(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
snake_case__ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __magic_name__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = self.num_labels
snake_case__ = MobileBertForSequenceClassification(__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
snake_case__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple):
'''simple docstring'''
snake_case__ = self.num_labels
snake_case__ = MobileBertForTokenClassification(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
snake_case__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = self.num_choices
snake_case__ = MobileBertForMultipleChoice(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
snake_case__ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
snake_case__ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
snake_case__ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
snake_case__ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Optional[int] = True
def __magic_name__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=False):
'''simple docstring'''
snake_case__ = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase)
if return_labels:
if model_class in get_values(__UpperCamelCase):
snake_case__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase)
snake_case__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase)
return inputs_dict
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = MobileBertModelTester(self)
snake_case__ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase)
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase)
def _UpperCAmelCase ( a : List[Any] ):
return torch.tensor(
_A , dtype=torch.long , device=_A , )
a__ = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = MobileBertModel.from_pretrained("""google/mobilebert-uncased""").to(__UpperCamelCase)
snake_case__ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
snake_case__ = model(__UpperCamelCase)[0]
snake_case__ = torch.Size((1, 9, 5_1_2))
self.assertEqual(output.shape , __UpperCamelCase)
snake_case__ = torch.tensor(
[
[
[-2.4_736_526E07, 8.2_691_656E04, 1.6_521_838E05],
[-5.7_541_704E-01, 3.9_056_022E00, 4.4_011_507E00],
[2.6_047_359E00, 1.5_677_652E00, -1.7_324_188E-01],
]
] , device=__UpperCamelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
snake_case__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE)
snake_case__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE)
self.assertTrue(lower_bound and upper_bound)
| 719
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a__ = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Union[tf.Tensor, np.ndarray] ):
if isinstance(a , np.ndarray ):
return list(tensor.shape )
snake_case__ = tf.shape(a )
if tensor.shape == tf.TensorShape(a ):
return dynamic
snake_case__ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(a )]
def _UpperCAmelCase ( a : tf.Tensor , a : Optional[int] = None , a : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=a , name=a )
def _UpperCAmelCase ( a : Optional[int] , a : Union[str, Any] , a : Dict , a : int=1e-5 , a : Dict=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a , a ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
snake_case__ , snake_case__ = tf.nn.moments(a , axes=[axis] , keepdims=a )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
snake_case__ = [1] * inputs.shape.rank
snake_case__ = shape_list(a )[axis]
snake_case__ = tf.reshape(a , a )
snake_case__ = tf.reshape(a , a )
# Compute layer normalization using the batch_normalization
# function.
snake_case__ = tf.nn.batch_normalization(
a , a , a , offset=a , scale=a , variance_epsilon=a , )
return outputs
def _UpperCAmelCase ( a : Optional[int] , a : Dict=0 , a : List[Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
snake_case__ = tf.shape(a )
snake_case__ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
snake_case__ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(a , a )
def _UpperCAmelCase ( a : tf.Tensor ):
if not isinstance(a , tf.Tensor ):
snake_case__ = tf.convert_to_tensor(a ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
snake_case__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
snake_case__ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
snake_case__ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _UpperCAmelCase ( a : tf.Tensor , a : int , a : str = "input_ids" ):
tf.debugging.assert_less(
a , tf.cast(a , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(a )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _UpperCAmelCase ( a : str , a : Tuple , a : Optional[int] ):
snake_case__ = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
snake_case__ = [x for x in data if len(a ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
snake_case__ = np.asarray(a )
snake_case__ = 1
snake_case__ = np.array_split(a , a )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
snake_case__ = np.array_split(a , a )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(a ):
snake_case__ = chunk_data
else:
snake_case__ = data
def _UpperCAmelCase ( a : List[Any] , a : Optional[int] ):
if name in group.attrs:
snake_case__ = [n.decode("""utf8""" ) if hasattr(a , """decode""" ) else n for n in group.attrs[name]]
else:
snake_case__ = []
snake_case__ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(a , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def _UpperCAmelCase ( a : Optional[int] ):
def _expand_single_ad_tensor(a : Any ):
if isinstance(a , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(a , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , a )
| 99
| 0
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = FlaxAutoencoderKL
@property
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = 4
__lowercase : Tuple = 3
__lowercase : Any = (32, 32)
__lowercase : Tuple = jax.random.PRNGKey(0 )
__lowercase : Tuple = jax.random.uniform(__a , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__lowercase : str = self.dummy_input
return init_dict, inputs_dict
| 149
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''align_text_model'''
def __init__( self : int , __a : Optional[int]=30522 , __a : int=768 , __a : Optional[Any]=12 , __a : Any=12 , __a : Tuple=3072 , __a : Tuple="gelu" , __a : List[Any]=0.1 , __a : Optional[int]=0.1 , __a : Dict=512 , __a : List[Any]=2 , __a : Dict=0.02 , __a : Optional[int]=1E-12 , __a : int=0 , __a : Optional[int]="absolute" , __a : Tuple=True , **__a : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Tuple = vocab_size
__lowercase : Dict = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Optional[Any] = hidden_act
__lowercase : Tuple = intermediate_size
__lowercase : List[str] = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : Any = max_position_embeddings
__lowercase : str = type_vocab_size
__lowercase : List[str] = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : Optional[int] = position_embedding_type
__lowercase : Union[str, Any] = use_cache
__lowercase : int = pad_token_id
@classmethod
def lowerCAmelCase ( cls : Tuple , __a : Union[str, os.PathLike] , **__a : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase , __lowercase : List[Any] = cls.get_config_dict(__a , **__a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__lowercase : Tuple = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''align_vision_model'''
def __init__( self : List[str] , __a : int = 3 , __a : int = 600 , __a : float = 2.0 , __a : float = 3.1 , __a : int = 8 , __a : List[int] = [3, 3, 5, 3, 5, 5, 3] , __a : List[int] = [32, 16, 24, 40, 80, 112, 192] , __a : List[int] = [16, 24, 40, 80, 112, 192, 320] , __a : List[int] = [] , __a : List[int] = [1, 2, 2, 2, 1, 2, 1] , __a : List[int] = [1, 2, 2, 3, 3, 4, 1] , __a : List[int] = [1, 6, 6, 6, 6, 6, 6] , __a : float = 0.25 , __a : str = "swish" , __a : int = 2560 , __a : str = "mean" , __a : float = 0.02 , __a : float = 0.001 , __a : float = 0.99 , __a : float = 0.2 , **__a : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Any = num_channels
__lowercase : Tuple = image_size
__lowercase : Tuple = width_coefficient
__lowercase : Any = depth_coefficient
__lowercase : str = depth_divisor
__lowercase : Union[str, Any] = kernel_sizes
__lowercase : int = in_channels
__lowercase : List[Any] = out_channels
__lowercase : int = depthwise_padding
__lowercase : Union[str, Any] = strides
__lowercase : Optional[int] = num_block_repeats
__lowercase : List[str] = expand_ratios
__lowercase : int = squeeze_expansion_ratio
__lowercase : str = hidden_act
__lowercase : List[str] = hidden_dim
__lowercase : Dict = pooling_type
__lowercase : Any = initializer_range
__lowercase : Tuple = batch_norm_eps
__lowercase : int = batch_norm_momentum
__lowercase : Tuple = drop_connect_rate
__lowercase : Tuple = sum(__a ) * 4
@classmethod
def lowerCAmelCase ( cls : str , __a : Union[str, os.PathLike] , **__a : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase , __lowercase : Optional[int] = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__lowercase : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = '''align'''
_A : Optional[int] = True
def __init__( self : Optional[Any] , __a : Optional[int]=None , __a : str=None , __a : int=640 , __a : List[Any]=1.0 , __a : Optional[int]=0.02 , **__a : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__a )
if text_config is None:
__lowercase : Optional[Any] = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
__lowercase : Dict = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
__lowercase : str = AlignTextConfig(**__a )
__lowercase : int = AlignVisionConfig(**__a )
__lowercase : str = projection_dim
__lowercase : Optional[int] = temperature_init_value
__lowercase : Dict = initializer_range
@classmethod
def lowerCAmelCase ( cls : List[Any] , __a : AlignTextConfig , __a : AlignVisionConfig , **__a : Any ) -> Any:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = copy.deepcopy(self.__dict__ )
__lowercase : Tuple = self.text_config.to_dict()
__lowercase : List[Any] = self.vision_config.to_dict()
__lowercase : List[str] = self.__class__.model_type
return output
| 149
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase_ ( __UpperCAmelCase ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase , """num_encoder_blocks""" ) )
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=6_4 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[2, 2, 2, 2] , UpperCAmelCase=[8, 4, 2, 1] , UpperCAmelCase=[1_6, 3_2, 6_4, 1_2_8] , UpperCAmelCase=[1, 4, 8, 1_6] , UpperCAmelCase=[1, 2, 4, 8] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = sr_ratios
__lowerCamelCase = depths
__lowerCamelCase = hidden_sizes
__lowerCamelCase = downsampling_rates
__lowerCamelCase = num_attention_heads
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = scope
def lowerCamelCase_ ( self ):
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = SegformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase = model(UpperCAmelCase )
__lowerCamelCase = __lowerCamelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = SegformerForSemanticSegmentation(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__lowerCamelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = 1
__lowerCamelCase = SegformerForSemanticSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCAmelCase )
__lowerCamelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
A = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A = True
A = False
A = False
A = False
def lowerCamelCase_ ( self ):
__lowerCamelCase = SegformerModelTester(self )
__lowerCamelCase = SegformerConfigTester(self , config_class=UpperCAmelCase )
def lowerCamelCase_ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCAmelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowerCamelCase_ ( self ):
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowerCamelCase_ ( self ):
pass
def lowerCamelCase_ ( self ):
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCAmelCase )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
__lowerCamelCase = sum(self.model_tester.depths )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# verify the first attentions (first block, first layer)
__lowerCamelCase = (self.model_tester.image_size // 4) ** 2
__lowerCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__lowerCamelCase = (self.model_tester.image_size // 3_2) ** 2
__lowerCamelCase = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__lowerCamelCase = len(UpperCAmelCase )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# verify the first attentions (first block, first layer)
__lowerCamelCase = (self.model_tester.image_size // 4) ** 2
__lowerCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCamelCase_ ( self ):
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
if not self.model_tester.is_training:
return
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCAmelCase ):
continue
__lowerCamelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
__lowerCamelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCamelCase = model(**UpperCAmelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self ):
pass
@slow
def lowerCamelCase_ ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = SegformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=UpperCAmelCase , align=UpperCAmelCase , do_random_crop=UpperCAmelCase )
__lowerCamelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCAmelCase )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
__lowerCamelCase = encoded_inputs.pixel_values.to(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase = model(UpperCAmelCase )
__lowerCamelCase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=UpperCAmelCase , align=UpperCAmelCase , do_random_crop=UpperCAmelCase )
__lowerCamelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(UpperCAmelCase )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
__lowerCamelCase = encoded_inputs.pixel_values.to(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase = model(UpperCAmelCase )
__lowerCamelCase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase , atol=1E-1 ) )
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=UpperCAmelCase , align=UpperCAmelCase , do_random_crop=UpperCAmelCase )
__lowerCamelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCAmelCase )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
__lowerCamelCase = encoded_inputs.pixel_values.to(UpperCAmelCase )
with torch.no_grad():
__lowerCamelCase = model(UpperCAmelCase )
__lowerCamelCase = outputs.logits.detach().cpu()
__lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase , target_sizes=[(5_0_0, 3_0_0)] )
__lowerCamelCase = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
__lowerCamelCase = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase )
__lowerCamelCase = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
| 717
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_a : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCamelCase__ ( _A: Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCamelCase__ ( _A: Optional[Any] , _A: Optional[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
__lowerCamelCase = False
elif args.student_type == "gpt2":
__lowerCamelCase = False
def UpperCamelCase__ ( _A: Dict , _A: Any ):
'''simple docstring'''
if args.student_type == "roberta":
__lowerCamelCase = False
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=_A , required=_A , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=_A , required=_A , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=_A , choices=["""distilbert""", """roberta""", """gpt2"""] , required=_A , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=_A , required=_A , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=_A , type=_A , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=_A , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=_A , required=_A , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=_A , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=_A , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=_A , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=_A , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=_A , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=_A , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=_A , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=_A , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=_A , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=_A , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=_A , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=_A , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=_A , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=_A , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_A , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=_A , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=_A , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=_A , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=_A , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=_A , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=_A , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=_A , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=_A , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=_A , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=_A , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=_A , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=_A , default=4000 , help="""Checkpoint interval.""" )
__lowerCamelCase = parser.parse_args()
sanity_checks(_A )
# ARGS #
init_gpu_params(_A )
set_seed(_A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(_A ) , _A , indent=4 )
git_log(args.dump_path )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = MODEL_CLASSES[args.student_type]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__lowerCamelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__lowerCamelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__lowerCamelCase = tokenizer.all_special_tokens.index(_A )
__lowerCamelCase = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
__lowerCamelCase = special_tok_ids
__lowerCamelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , """rb""" ) as fp:
__lowerCamelCase = pickle.load(_A )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , """rb""" ) as fp:
__lowerCamelCase = pickle.load(_A )
__lowerCamelCase = np.maximum(_A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__lowerCamelCase = 0.0 # do not predict special tokens
__lowerCamelCase = torch.from_numpy(_A )
else:
__lowerCamelCase = None
__lowerCamelCase = LmSeqsDataset(params=_A , data=_A )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
__lowerCamelCase = student_config_class.from_pretrained(args.student_config )
__lowerCamelCase = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__lowerCamelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=_A )
else:
__lowerCamelCase = student_model_class(_A )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
__lowerCamelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_A )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_A , _A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_A , _A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__lowerCamelCase = Distiller(
params=_A , dataset=_A , token_probs=_A , student=_A , teacher=_A )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 571
| 0
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowercase__ , 'num_heads' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=64 , lowercase__=3 , lowercase__=[16, 48, 96] , lowercase__=[1, 3, 6] , lowercase__=[1, 2, 10] , lowercase__=[7, 3, 3] , lowercase__=[4, 2, 2] , lowercase__=[2, 1, 1] , lowercase__=[2, 2, 2] , lowercase__=[False, False, True] , lowercase__=[0.0, 0.0, 0.0] , lowercase__=0.0_2 , lowercase__=1E-12 , lowercase__=True , lowercase__=True , lowercase__=2 , ) -> int:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : int = patch_sizes
SCREAMING_SNAKE_CASE : List[Any] = patch_stride
SCREAMING_SNAKE_CASE : Tuple = patch_padding
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : Optional[Any] = num_heads
SCREAMING_SNAKE_CASE : Optional[int] = stride_kv
SCREAMING_SNAKE_CASE : Any = depth
SCREAMING_SNAKE_CASE : str = cls_token
SCREAMING_SNAKE_CASE : List[Any] = attention_drop_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
SCREAMING_SNAKE_CASE : Dict = CvtModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
SCREAMING_SNAKE_CASE : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : str = CvtForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[str] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
snake_case__ : Any = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple = False
snake_case__ : str = False
snake_case__ : Optional[Any] = False
snake_case__ : Union[str, Any] = False
snake_case__ : List[str] = False
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : str = CvtModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def _UpperCamelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
SCREAMING_SNAKE_CASE : str = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[Any] = len(self.model_tester.depth )
self.assertEqual(len(lowercase__ ) , lowercase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCamelCase ( self ) -> List[Any]:
pass
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = CvtModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ) -> Tuple:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=lowercase__ , return_tensors='pt' ).to(lowercase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowercase__ )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 251
|
'''simple docstring'''
def __lowerCAmelCase ( a_ = 1 , a_ = 1000 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Optional[int] = 0
for divide_by_number in range(a_ , digit + 1 ):
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : Any = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(a_ ):
SCREAMING_SNAKE_CASE : List[Any] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = divide_by_number
else:
has_been_divided.append(a_ )
SCREAMING_SNAKE_CASE : Optional[int] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ :Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ :Tuple = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
__UpperCamelCase : List[Any] =state_dict.pop(a_ )
__UpperCamelCase : str =val
def A ( a_ ) -> List[str]:
__UpperCamelCase : List[Any] =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCamelCase : Union[str, Any] =key.replace('backbone.0.body' ,'backbone.conv_encoder.model' )
__UpperCamelCase : Any =value
else:
__UpperCamelCase : Optional[Any] =value
return new_state_dict
def A ( a_ ,a_=False ) -> Any:
__UpperCamelCase : Optional[int] =''
if is_panoptic:
__UpperCamelCase : str ='conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCamelCase : Union[str, Any] =state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__UpperCamelCase : Dict =state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : Tuple =in_proj_weight[:256, :]
__UpperCamelCase : Union[str, Any] =in_proj_bias[:256]
__UpperCamelCase : str =in_proj_weight[256:512, :]
__UpperCamelCase : List[str] =in_proj_bias[256:512]
__UpperCamelCase : Union[str, Any] =in_proj_weight[-256:, :]
__UpperCamelCase : List[Any] =in_proj_bias[-256:]
def A ( ) -> int:
__UpperCamelCase : Tuple ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase : Any =Image.open(requests.get(a_ ,stream=a_ ).raw )
return im
@torch.no_grad()
def A ( a_ ,a_ ) -> Optional[Any]:
__UpperCamelCase : List[str] =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__UpperCamelCase : int ='resnet101'
if "dc5" in model_name:
__UpperCamelCase : List[str] =True
__UpperCamelCase : int ='panoptic' in model_name
if is_panoptic:
__UpperCamelCase : List[str] =250
else:
__UpperCamelCase : Tuple =91
__UpperCamelCase : Optional[Any] ='huggingface/label-files'
__UpperCamelCase : List[str] ='coco-detection-id2label.json'
__UpperCamelCase : Optional[int] =json.load(open(hf_hub_download(a_ ,a_ ,repo_type='dataset' ) ,'r' ) )
__UpperCamelCase : Union[str, Any] ={int(a_ ): v for k, v in idalabel.items()}
__UpperCamelCase : Union[str, Any] =idalabel
__UpperCamelCase : int ={v: k for k, v in idalabel.items()}
# load image processor
__UpperCamelCase : List[str] ='coco_panoptic' if is_panoptic else 'coco_detection'
__UpperCamelCase : Any =ConditionalDetrImageProcessor(format=a_ )
# prepare image
__UpperCamelCase : str =prepare_img()
__UpperCamelCase : Tuple =image_processor(images=a_ ,return_tensors='pt' )
__UpperCamelCase : Dict =encoding['pixel_values']
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
__UpperCamelCase : str =torch.hub.load('DeppMeng/ConditionalDETR' ,a_ ,pretrained=a_ ).eval()
__UpperCamelCase : int =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__UpperCamelCase : Any ='conditional_detr.' + src
rename_key(a_ ,a_ ,a_ )
__UpperCamelCase : Optional[Any] =rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ ,is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCamelCase : Union[str, Any] ='conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__UpperCamelCase : Dict =state_dict.pop(a_ )
__UpperCamelCase : List[str] =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCamelCase : Dict =state_dict.pop(a_ )
__UpperCamelCase : Union[str, Any] =val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__UpperCamelCase : Optional[Any] =state_dict.pop(a_ )
__UpperCamelCase : List[str] =val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__UpperCamelCase : List[str] =state_dict.pop(a_ )
__UpperCamelCase : str =val
# finally, create HuggingFace model and load state dict
__UpperCamelCase : List[str] =ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ ,organization='DepuMeng' ,commit_message='Add model' )
# verify our conversion
__UpperCamelCase : List[str] =conditional_detr(a_ )
__UpperCamelCase : Optional[Any] =model(a_ )
assert torch.allclose(outputs.logits ,original_outputs['pred_logits'] ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs['pred_boxes'] ,atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs['pred_masks'] ,atol=1e-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
A_ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A_ :Tuple = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 154
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A_ :List[str] = None
A_ :Dict = logging.get_logger(__name__)
A_ :Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ :Union[str, Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
A_ :Union[str, Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
A_ :Any = '''▁'''
# Segments (not really needed)
A_ :Tuple = 0
A_ :Union[str, Any] = 1
A_ :Tuple = 2
A_ :str = 3
A_ :int = 4
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict ="""left"""
UpperCamelCase__ : int =XLNetTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<eop>", "<eod>"] , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : Union[str, Any] =3
__UpperCamelCase : Optional[int] =do_lower_case
__UpperCamelCase : Optional[Any] =remove_space
__UpperCamelCase : Union[str, Any] =keep_accents
__UpperCamelCase : Optional[Any] =vocab_file
__UpperCamelCase : List[str] =False if not self.vocab_file else True
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[str] =[self.sep_token_id]
__UpperCamelCase : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Tuple =[self.sep_token_id]
__UpperCamelCase : Dict =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Tuple =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 154
| 1
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : List[Any] = '''umt5'''
__lowercase : Union[str, Any] = ['''past_key_values''']
def __init__( self , __SCREAMING_SNAKE_CASE=25_0112 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gated-gelu" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="T5Tokenizer" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
is_encoder_decoder=__SCREAMING_SNAKE_CASE , tokenizer_class=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = vocab_size
__snake_case = d_model
__snake_case = d_kv
__snake_case = d_ff
__snake_case = num_layers
__snake_case = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case = num_heads
__snake_case = relative_attention_num_buckets
__snake_case = relative_attention_max_distance
__snake_case = dropout_rate
__snake_case = layer_norm_epsilon
__snake_case = initializer_factor
__snake_case = feed_forward_proj
__snake_case = use_cache
__snake_case = self.feed_forward_proj.split('''-''' )
__snake_case = act_info[-1]
__snake_case = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
__snake_case = '''gelu_new'''
@property
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return self.d_model
@property
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.num_heads
@property
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.num_layers
class lowerCAmelCase ( __lowerCAmelCase):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__snake_case = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
__snake_case = '''past_encoder_sequence + sequence'''
__snake_case = {0: '''batch'''}
__snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return 13
@property
def lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 5E-4
| 24
|
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24
| 1
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = jnp.floataa
def _A( self ):
lowercase =[]
lowercase =[]
for i in range(self.num_layers ):
lowercase =self.in_channels if i == 0 else self.out_channels
lowercase =FlaxResnetBlockaD(
in_channels=snake_case_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case_ )
lowercase =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(snake_case_ )
lowercase =resnets
lowercase =attentions
if self.add_downsample:
lowercase =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
lowercase =()
for resnet, attn in zip(self.resnets , self.attentions ):
lowercase =resnet(snake_case_ , snake_case_ , deterministic=snake_case_ )
lowercase =attn(snake_case_ , snake_case_ , deterministic=snake_case_ )
output_states += (hidden_states,)
if self.add_downsample:
lowercase =self.downsamplers_a(snake_case_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1
UpperCamelCase__ = True
UpperCamelCase__ = jnp.floataa
def _A( self ):
lowercase =[]
for i in range(self.num_layers ):
lowercase =self.in_channels if i == 0 else self.out_channels
lowercase =FlaxResnetBlockaD(
in_channels=snake_case_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case_ )
lowercase =resnets
if self.add_downsample:
lowercase =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , snake_case_ , snake_case_ , snake_case_=True ):
lowercase =()
for resnet in self.resnets:
lowercase =resnet(snake_case_ , snake_case_ , deterministic=snake_case_ )
output_states += (hidden_states,)
if self.add_downsample:
lowercase =self.downsamplers_a(snake_case_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = jnp.floataa
def _A( self ):
lowercase =[]
lowercase =[]
for i in range(self.num_layers ):
lowercase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase =self.prev_output_channel if i == 0 else self.out_channels
lowercase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case_ )
lowercase =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(snake_case_ )
lowercase =resnets
lowercase =attentions
if self.add_upsample:
lowercase =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowercase =res_hidden_states_tuple[-1]
lowercase =res_hidden_states_tuple[:-1]
lowercase =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase =resnet(snake_case_ , snake_case_ , deterministic=snake_case_ )
lowercase =attn(snake_case_ , snake_case_ , deterministic=snake_case_ )
if self.add_upsample:
lowercase =self.upsamplers_a(snake_case_ )
return hidden_states
class __magic_name__ ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1
UpperCamelCase__ = True
UpperCamelCase__ = jnp.floataa
def _A( self ):
lowercase =[]
for i in range(self.num_layers ):
lowercase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase =self.prev_output_channel if i == 0 else self.out_channels
lowercase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case_ )
lowercase =resnets
if self.add_upsample:
lowercase =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
for resnet in self.resnets:
# pop res hidden states
lowercase =res_hidden_states_tuple[-1]
lowercase =res_hidden_states_tuple[:-1]
lowercase =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase =resnet(snake_case_ , snake_case_ , deterministic=snake_case_ )
if self.add_upsample:
lowercase =self.upsamplers_a(snake_case_ )
return hidden_states
class __magic_name__ ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = jnp.floataa
def _A( self ):
# there is always at least one resnet
lowercase =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowercase =[]
for _ in range(self.num_layers ):
lowercase =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(snake_case_ )
lowercase =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case_ )
lowercase =resnets
lowercase =attentions
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
lowercase =self.resnets[0](snake_case_ , snake_case_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowercase =attn(snake_case_ , snake_case_ , deterministic=snake_case_ )
lowercase =resnet(snake_case_ , snake_case_ , deterministic=snake_case_ )
return hidden_states
| 145
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __magic_name__ :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=20 , snake_case_=2 , snake_case_=1 , snake_case_=0 , snake_case_=4 , ):
lowercase =parent
lowercase =batch_size
lowercase =seq_length
lowercase =is_training
lowercase =use_labels
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =eos_token_id
lowercase =pad_token_id
lowercase =bos_token_id
lowercase =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowercase =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowercase =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _A( self ):
lowercase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase =tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowercase =prepare_led_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
lowercase =tf.concat(
[tf.zeros_like(snake_case_ )[:, :-1], tf.ones_like(snake_case_ )[:, -1:]] , axis=-1 , )
lowercase =global_attention_mask
return config, inputs_dict
def _A( self , snake_case_ , snake_case_ ):
lowercase =TFLEDModel(config=snake_case_ ).get_decoder()
lowercase =inputs_dict['''input_ids''']
lowercase =input_ids[:1, :]
lowercase =inputs_dict['''attention_mask'''][:1, :]
lowercase =1
# first forward pass
lowercase =model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
lowercase , lowercase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase =tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase =model(snake_case_ , attention_mask=snake_case_ )[0]
lowercase =model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase =output_from_no_past[:, -3:, random_slice_idx]
lowercase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1E-3 )
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Any=None , lowercase_ : Any=None , ) -> Optional[int]:
'''simple docstring'''
if attention_mask is None:
lowercase =tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =TFLEDModelTester(self )
lowercase =ConfigTester(self , config_class=snake_case_ )
def _A( self ):
self.config_tester.run_common_tests()
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =tf.zeros_like(inputs_dict['''attention_mask'''] )
lowercase =2
lowercase =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
lowercase =True
lowercase =self.model_tester.seq_length
lowercase =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(snake_case_ ):
lowercase =outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(snake_case_ ):
lowercase =[t.numpy() for t in outputs.encoder_attentions]
lowercase =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowercase =True
lowercase =False
lowercase =False
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
lowercase =len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase =True
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
lowercase =True
lowercase =True
lowercase =model_class(snake_case_ )
lowercase =model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def _A( self ):
pass
def _A( self ):
# TODO: Head-masking not yet implement
pass
def UpperCamelCase ( lowercase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
return tf.constant(lowercase_ , dtype=tf.intaa )
_UpperCAmelCase : Any = 1e-4
@slow
@require_tf
class __magic_name__ ( unittest.TestCase ):
def _A( self ):
lowercase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
lowercase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
lowercase =model(**snake_case_ )[0]
lowercase =(1, 10_24, 7_68)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
lowercase =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 )
def _A( self ):
lowercase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
lowercase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
lowercase =prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
lowercase =model(**snake_case_ )[0]
lowercase =(1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
lowercase =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 , rtol=1E-3 )
| 145
| 1
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A__ ):
__lowerCamelCase = ["""note_seq"""]
def __init__( self : Optional[Any] , *__a : Tuple , **__a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""note_seq"""] )
@classmethod
def lowerCamelCase_ ( cls : Dict , *__a : List[str] , **__a : List[str] ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] , *__a : int , **__a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
| 306
|
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 306
| 1
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=13 , __lowercase=32 , __lowercase=2 , __lowercase=3 , __lowercase=16 , __lowercase=[1, 2, 1] , __lowercase=[2, 2, 4] , __lowercase=2 , __lowercase=2.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=True , __lowercase=0.0_2 , __lowercase=1e-5 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=10 , __lowercase=8 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = patch_norm
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = is_training
__lowerCAmelCase = scope
__lowerCAmelCase = use_labels
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = encoder_stride
def lowercase (self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase (self ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase (self , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = SwinvaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase )
__lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase (self , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = SwinvaForMaskedImageModeling(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = SwinvaForMaskedImageModeling(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase (self , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = SwinvaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase (self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__UpperCamelCase : Union[str, Any] = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Any = False
def lowercase (self ):
__lowerCAmelCase = SwinvaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__lowercase , embed_dim=37 )
def lowercase (self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def lowercase (self ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def lowercase (self ):
pass
def lowercase (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def lowercase (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__lowercase )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def lowercase (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__lowerCAmelCase = outputs.attentions
__lowerCAmelCase = len(self.model_tester.depths )
self.assertEqual(len(__lowercase ) , __lowercase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = config.window_size**2
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__lowercase ) , __lowercase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__lowerCAmelCase = len(__lowercase )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__lowercase , __lowercase ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
__lowerCAmelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__lowerCAmelCase = 2
self.assertEqual(out_len + added_hidden_states , len(__lowercase ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__lowercase ) , __lowercase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowercase (self , __lowercase , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowercase ) , __lowercase )
# Swinv2 has a different seq_length
__lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowerCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(__lowercase ) , __lowercase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reshaped_hidden_states[0].shape
__lowerCAmelCase = (
reshaped_hidden_states[0].view(__lowercase , __lowercase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCAmelCase = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , __lowercase )
def lowercase (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCAmelCase = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
self.check_hidden_states_output(__lowercase , __lowercase , __lowercase , (padded_height, padded_width) )
def lowercase (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase )
def lowercase (self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def lowercase (self ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = SwinvaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowercase (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(__lowercase )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=__lowercase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase (self ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def lowercase (self ):
__lowerCAmelCase = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
__lowercase )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__lowerCAmelCase = image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__lowercase )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowercase )
__lowerCAmelCase = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 719
|
'''simple docstring'''
from __future__ import annotations
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = sorted(numsa + numsa)
__lowerCAmelCase , __lowerCAmelCase = divmod(len(lowerCamelCase), 2)
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Union[str, Any] = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase : int = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 474
| 0
|
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: Optional[int] , *__a: Union[str, Any] , **__a: Dict )-> List[Any]:
super().__init__(*__a , **__a )
def a__ ( self: List[str] , __a: Optional[int] , __a: int )-> str:
lowerCamelCase : Union[str, Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__a )
lowerCamelCase : str = self.values[key]
def a__ ( self: Union[str, Any] )-> List[str]:
return (
sum(self.charge_factor - len(__a ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a__ ( self: str , __a: Optional[Any] , __a: List[Any]=None )-> Optional[int]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__a ) == 0
):
return key
return super()._collision_resolution(__a , __a )
| 222
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def snake_case ( ) -> Generator[int, None, None]:
lowerCamelCase : dict[int, int] = {}
lowerCamelCase : str = 2
while True:
lowerCamelCase : int = factor_map.pop(UpperCamelCase__ , UpperCamelCase__ )
if factor:
lowerCamelCase : List[Any] = factor + prime
while x in factor_map:
x += factor
lowerCamelCase : int = factor
else:
lowerCamelCase : Optional[int] = prime
yield prime
prime += 1
def snake_case ( UpperCamelCase__ : float = 1E10 ) -> int:
lowerCamelCase : Optional[int] = sieve()
lowerCamelCase : List[str] = 1
while True:
lowerCamelCase : Tuple = next(UpperCamelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 222
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase ( _lowercase ):
"""simple docstring"""
snake_case = ['''image_processor''', '''tokenizer''']
snake_case = '''BlipImageProcessor'''
snake_case = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : str ,_SCREAMING_SNAKE_CASE : str ,_SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
'''simple docstring'''
A = False
super().__init__(A_ ,A_ )
A = self.image_processor
def __call__( self : Dict ,_SCREAMING_SNAKE_CASE : ImageInput = None ,_SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_SCREAMING_SNAKE_CASE : bool = True ,_SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False ,_SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None ,_SCREAMING_SNAKE_CASE : Optional[int] = None ,_SCREAMING_SNAKE_CASE : int = 0 ,_SCREAMING_SNAKE_CASE : Optional[int] = None ,_SCREAMING_SNAKE_CASE : Optional[bool] = None ,_SCREAMING_SNAKE_CASE : bool = False ,_SCREAMING_SNAKE_CASE : bool = False ,_SCREAMING_SNAKE_CASE : bool = False ,_SCREAMING_SNAKE_CASE : bool = False ,_SCREAMING_SNAKE_CASE : bool = False ,_SCREAMING_SNAKE_CASE : bool = True ,_SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None ,**_SCREAMING_SNAKE_CASE : Tuple ,) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
A = self.tokenizer
A = self.tokenizer(
text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,)
return text_encoding
# add pixel_values
A = self.image_processor(A_ ,return_tensors=A_ )
if text is not None:
A = self.tokenizer(
text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,)
else:
A = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def A( self : int ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ ,**A_ )
def A( self : List[str] ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : Any ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*A_ ,**A_ )
@property
def A( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 700
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def snake_case ( UpperCAmelCase : List[Any] ):
if "model" in orig_key:
A = orig_key.replace('model.', '' )
if "norm1" in orig_key:
A = orig_key.replace('norm1', 'attention.output.LayerNorm' )
if "norm2" in orig_key:
A = orig_key.replace('norm2', 'output.LayerNorm' )
if "norm" in orig_key:
A = orig_key.replace('norm', 'LayerNorm' )
if "transformer" in orig_key:
A = orig_key.split('.' )[0].split('_' )[-1]
A = orig_key.replace(f'transformer_{layer_num}', f'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
A = orig_key.replace('mha.attn', 'attention.self' )
if "mha" in orig_key:
A = orig_key.replace('mha', 'attention' )
if "W_q" in orig_key:
A = orig_key.replace('W_q', 'self.query' )
if "W_k" in orig_key:
A = orig_key.replace('W_k', 'self.key' )
if "W_v" in orig_key:
A = orig_key.replace('W_v', 'self.value' )
if "ff1" in orig_key:
A = orig_key.replace('ff1', 'intermediate.dense' )
if "ff2" in orig_key:
A = orig_key.replace('ff2', 'output.dense' )
if "ff" in orig_key:
A = orig_key.replace('ff', 'output.dense' )
if "mlm_class" in orig_key:
A = orig_key.replace('mlm.mlm_class', 'cls.predictions.decoder' )
if "mlm" in orig_key:
A = orig_key.replace('mlm', 'cls.predictions.transform' )
if "cls" not in orig_key:
A = 'yoso.' + orig_key
return orig_key
def snake_case ( UpperCAmelCase : Tuple, UpperCAmelCase : str ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(UpperCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
A = val
A = orig_state_dict['cls.predictions.decoder.bias']
A = torch.arange(UpperCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def snake_case ( UpperCAmelCase : int, UpperCAmelCase : Union[str, Any], UpperCAmelCase : str ):
A = torch.load(UpperCAmelCase, map_location='cpu' )['model_state_dict']
A = YosoConfig.from_json_file(UpperCAmelCase )
A = YosoForMaskedLM(UpperCAmelCase )
A = convert_checkpoint_helper(config.max_position_embeddings, UpperCAmelCase )
print(model.load_state_dict(UpperCAmelCase ) )
model.eval()
model.save_pretrained(UpperCAmelCase )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 110
| 0
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCamelCase__ : int = 1_0_0
lowerCamelCase__ : Optional[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCamelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def UpperCamelCase ( lowercase_ ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase__ : set[int] = set()
lowercase__ : int
lowercase__ : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( lowercase_ = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , lowercase_ ):
if len(partition(lowercase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 12
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase_ ( self ):
__A , __A : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=_A , dtype=jnp.bfloataa )
__A , __A : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_A , from_pt=_A , dtype=jnp.bfloataa )
__A : Optional[Any] = controlnet_params
__A : Optional[int] = 'bird'
__A : List[str] = jax.device_count()
__A : Any = pipe.prepare_text_inputs([prompts] * num_samples )
__A : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
__A : List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
__A : List[str] = jax.random.PRNGKey(0 )
__A : List[str] = jax.random.split(_A , jax.device_count() )
__A : int = replicate(_A )
__A : Optional[Any] = shard(_A )
__A : List[str] = shard(_A )
__A : str = pipe(
prompt_ids=_A , image=_A , params=_A , prng_seed=_A , num_inference_steps=50 , jit=_A , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__A : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__A : Dict = images[0, 253:256, 253:256, -1]
__A : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A : List[str] = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
__A , __A : List[Any] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=_A , dtype=jnp.bfloataa )
__A , __A : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_A , from_pt=_A , dtype=jnp.bfloataa )
__A : Optional[int] = controlnet_params
__A : Tuple = 'Chef in the kitchen'
__A : Optional[int] = jax.device_count()
__A : Optional[int] = pipe.prepare_text_inputs([prompts] * num_samples )
__A : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
__A : Dict = pipe.prepare_image_inputs([pose_image] * num_samples )
__A : int = jax.random.PRNGKey(0 )
__A : Dict = jax.random.split(_A , jax.device_count() )
__A : str = replicate(_A )
__A : Union[str, Any] = shard(_A )
__A : List[Any] = shard(_A )
__A : Any = pipe(
prompt_ids=_A , image=_A , params=_A , prng_seed=_A , num_inference_steps=50 , jit=_A , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__A : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__A : List[Any] = images[0, 253:256, 253:256, -1]
__A : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A : Optional[Any] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 239
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=99 , __UpperCAmelCase=0 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase="last" , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Any:
A : int = parent
A : Union[str, Any] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : List[Any] = use_input_lengths
A : Optional[int] = use_token_type_ids
A : Optional[int] = use_labels
A : str = gelu_activation
A : int = sinusoidal_embeddings
A : Tuple = causal
A : Any = asm
A : str = n_langs
A : str = vocab_size
A : List[str] = n_special
A : Optional[int] = hidden_size
A : Any = num_hidden_layers
A : int = num_attention_heads
A : Optional[Any] = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : Dict = max_position_embeddings
A : Optional[int] = type_vocab_size
A : int = type_sequence_label_size
A : List[str] = initializer_range
A : Optional[Any] = num_labels
A : Optional[Any] = num_choices
A : List[Any] = summary_type
A : int = use_proj
A : Optional[int] = scope
def snake_case ( self ) -> Union[str, Any]:
A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : str = random_attention_mask([self.batch_size, self.seq_length] )
A : List[Any] = None
if self.use_input_lengths:
A : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A : Tuple = None
if self.use_token_type_ids:
A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A : Dict = None
A : Union[str, Any] = None
A : Tuple = None
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : str = ids_tensor([self.batch_size] , 2 ).float()
A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case ( self ) -> Optional[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
A : Union[str, Any] = FlaubertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Optional[Any] = model(__UpperCAmelCase , lengths=__UpperCAmelCase , langs=__UpperCAmelCase )
A : Dict = model(__UpperCAmelCase , langs=__UpperCAmelCase )
A : List[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int:
A : List[Any] = FlaubertWithLMHeadModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Optional[int] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
A : Dict = FlaubertForQuestionAnsweringSimple(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Tuple = model(__UpperCAmelCase )
A : Optional[int] = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
A : Tuple = FlaubertForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Optional[Any] = model(__UpperCAmelCase )
A : Optional[Any] = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , p_mask=__UpperCAmelCase , )
A : int = model(
__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , cls_index=__UpperCAmelCase , is_impossible=__UpperCAmelCase , )
(A ) : Union[str, Any] = result_with_labels.to_tuple()
A : Any = model(__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase )
(A ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int:
A : Dict = FlaubertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Dict = model(__UpperCAmelCase )
A : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int:
A : List[Any] = self.num_labels
A : str = FlaubertForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
A : List[str] = self.num_choices
A : Optional[int] = FlaubertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Optional[int] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ) -> List[Any]:
A : List[str] = self.prepare_config_and_inputs()
(
A
) : Dict = config_and_inputs
A : Any = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Optional[int] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[int]:
A : Tuple = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
A : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
A : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def snake_case ( self ) -> Optional[Any]:
A : List[Any] = FlaubertModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 )
def snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def snake_case ( self ) -> str:
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase )
def snake_case ( self ) -> Dict:
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase )
def snake_case ( self ) -> Tuple:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__UpperCAmelCase )
def snake_case ( self ) -> int:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase )
def snake_case ( self ) -> List[str]:
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase )
def snake_case ( self ) -> Optional[int]:
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__UpperCAmelCase )
def snake_case ( self ) -> Optional[int]:
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__UpperCAmelCase )
@slow
def snake_case ( self ) -> List[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : List[Any] = FlaubertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
@require_torch_gpu
def snake_case ( self ) -> List[str]:
A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
A : int = True
A : Any = model_class(config=__UpperCAmelCase )
A : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
A : str = torch.jit.trace(
__UpperCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , '''traced_model.pt''' ) )
A : Any = torch.jit.load(os.path.join(__UpperCAmelCase , '''traced_model.pt''' ) , map_location=__UpperCAmelCase )
loaded(inputs_dict['''input_ids'''].to(__UpperCAmelCase ) , inputs_dict['''attention_mask'''].to(__UpperCAmelCase ) )
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ) -> Optional[int]:
A : Dict = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
A : List[str] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
A : List[str] = model(__UpperCAmelCase )[0]
A : str = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __UpperCAmelCase )
A : List[Any] = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 709
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowercase : Union[str, Any] = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case__ ( lowerCamelCase_ ):
A : Optional[Any] = test_results.split(''' ''' )
A : List[str] = 0
A : str = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A : Optional[int] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case__ ( lowerCamelCase_ ):
A : Any = {}
A : List[str] = None
A : Dict = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
A : int = True
A : List[str] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
A : Optional[int] = line
A : int = False
return failures
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
A : Tuple = title
A : List[Any] = doc_test_results['''time_spent'''].split(''',''' )[0]
A : List[Any] = doc_test_results['''success''']
A : Optional[Any] = doc_test_results['''failures''']
A : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
A : Optional[int] = doc_test_results
@property
def snake_case ( self ) -> str:
A : List[str] = [self._time_spent]
A : Dict = 0
for time in time_spent:
A : Optional[Any] = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCAmelCase ) == 1:
A : List[str] = [0, 0, time_parts[0]]
A , A , A : Union[str, Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A : Union[str, Any] = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__UpperCAmelCase )}h{int(__UpperCAmelCase )}m{int(__UpperCAmelCase )}s'
@property
def snake_case ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def snake_case ( self ) -> Dict:
A : Optional[int] = 40
A : Dict = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__UpperCAmelCase , __UpperCAmelCase )}
A : List[str] = ''''''
for category, failures in category_failures.items():
if len(__UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def snake_case ( self ) -> str:
A : Any = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCAmelCase )
@staticmethod
def snake_case ( ) -> Dict:
A : int = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=__UpperCAmelCase , )
def snake_case ( self ) -> Optional[Any]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
A : Dict = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else '''All tests passed.'''
A : Any = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=__UpperCAmelCase , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
A : Optional[int] = ''''''
for key, value in failures.items():
A : Optional[int] = value[:2_00] + ''' [Truncated]''' if len(__UpperCAmelCase ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
A : Optional[Any] = job_name
A : List[str] = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
A : Union[str, Any] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case ( self ) -> str:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
A : Union[str, Any] = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
A : str = sorted(self.doc_test_results.items() , key=lambda __UpperCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
A : List[str] = f'*Num failures* :{len(job_result["failed"] )} \n'
A : Union[str, Any] = job_result['''failures''']
A : str = self.get_reply_blocks(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text=__UpperCAmelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f'Results for {job}' , blocks=__UpperCAmelCase , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def snake_case__ ( ):
A : Optional[int] = os.environ['''GITHUB_RUN_ID''']
A : Optional[Any] = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
A : Any = requests.get(lowerCamelCase_ ).json()
A : Optional[Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
A : Optional[int] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCamelCase_ ):
A : str = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def snake_case__ ( lowerCamelCase_ ):
A : Any = {}
if os.path.exists(lowerCamelCase_ ):
A : Tuple = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
A : Any = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}.' ) from e
return _artifact
def snake_case__ ( ):
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ) -> Tuple:
A : Dict = name
A : Dict = []
def __str__( self ) -> int:
return self.name
def snake_case ( self , __UpperCAmelCase ) -> List[str]:
self.paths.append({'''name''': self.name, '''path''': path} )
A : Dict[str, Artifact] = {}
A : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
A : Optional[int] = directory
if artifact_name not in _available_artifacts:
A : Any = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
lowercase : Dict = get_job_links()
lowercase : Union[str, Any] = retrieve_available_artifacts()
lowercase : Any = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowercase : int = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowercase : Union[str, Any] = github_actions_job_links.get("run_doctests")
lowercase : List[str] = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
lowercase : Optional[Any] = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
lowercase , lowercase , lowercase : Union[str, Any] = handle_test_results(artifact["stats"])
lowercase : Optional[int] = failed
lowercase : str = success
lowercase : int = time_spent[1:-1] + ", "
lowercase : Union[str, Any] = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
lowercase : Union[str, Any] = line.replace("FAILED ", "")
lowercase : Any = line.split()[0].replace("\n", "")
if "::" in line:
lowercase , lowercase : Tuple = line.split("::")
else:
lowercase , lowercase : int = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowercase : Tuple = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowercase : List[Any] = all_failures[test] if test in all_failures else "N/A"
lowercase : List[str] = failure
break
lowercase : Optional[int] = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 423
| 0
|
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((A__) , (A__)) = extended_euclid(A , a % b )
A__ = a // b
return (y, x - k * y)
def __a ( A , A , A , A ) -> int:
'''simple docstring'''
((A__) , (A__)) = extended_euclid(A , A )
A__ = na * na
A__ = ra * x * na + ra * y * na
return (n % m + m) % m
def __a ( A , A ) -> int:
'''simple docstring'''
((A__) , (A__)) = extended_euclid(A , A )
if b < 0:
A__ = (b % n + n) % n
return b
def __a ( A , A , A , A ) -> int:
'''simple docstring'''
A__ , A__ = invert_modulo(A , A ), invert_modulo(A , A )
A__ = na * na
A__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 337
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
__UpperCAmelCase ="""us-east-1""" # defaults region
@dataclass
class lowerCAmelCase__ :
lowercase__ : str
lowercase__ : List[Any] = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
lowercase__ : Union[str, Any] = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 5_00,
"""save_steps""": 55_00,
}
lowercase__ : List[str] = {**hyperparameters, """max_steps""": 10_00}
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowercase_ ( self ):
'''simple docstring'''
return f"""{self.framework}-transfromers-test"""
@property
def lowercase_ ( self ):
'''simple docstring'''
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def __a ( A ) -> int:
'''simple docstring'''
A__ = SageMakerTestEnvironment(framework=request.cls.framework )
| 337
| 1
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a : List[str] = logging.get_logger(__name__)
a : Tuple = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = field(
default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__ )} )
SCREAMING_SNAKE_CASE__ : str = field(
default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
SCREAMING_SNAKE_CASE__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE__ : int = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
SCREAMING_SNAKE_CASE__ : int = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
SCREAMING_SNAKE_CASE__ : int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
SCREAMING_SNAKE_CASE__ : bool = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
SCREAMING_SNAKE_CASE__ : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
SCREAMING_SNAKE_CASE__ : int = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
SCREAMING_SNAKE_CASE__ : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
SCREAMING_SNAKE_CASE__ : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = "train"
SCREAMING_SNAKE_CASE__ : Tuple = "dev"
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : SquadDataTrainingArguments
SCREAMING_SNAKE_CASE__ : List[SquadFeatures]
SCREAMING_SNAKE_CASE__ : Split
SCREAMING_SNAKE_CASE__ : bool
def __init__( self , snake_case , snake_case , snake_case = None , snake_case = Split.train , snake_case = False , snake_case = None , snake_case = "pt" , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = args
UpperCAmelCase : Optional[int] = is_language_sensitive
UpperCAmelCase : Tuple = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case , snake_case ):
try:
UpperCAmelCase : int = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
UpperCAmelCase : Optional[Any] = mode
# Load data features from cache or dataset file
UpperCAmelCase : int = "v2" if args.version_2_with_negative else "v1"
UpperCAmelCase : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : List[Any] = cached_features_file + ".lock"
with FileLock(snake_case ):
if os.path.exists(snake_case ) and not args.overwrite_cache:
UpperCAmelCase : Optional[int] = time.time()
UpperCAmelCase : List[str] = torch.load(snake_case )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
UpperCAmelCase : Tuple = self.old_features["features"]
UpperCAmelCase : Dict = self.old_features.get("dataset" , snake_case )
UpperCAmelCase : Union[str, Any] = self.old_features.get("examples" , snake_case )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
UpperCAmelCase : str = self.processor.get_dev_examples(args.data_dir )
else:
UpperCAmelCase : Optional[int] = self.processor.get_train_examples(args.data_dir )
UpperCAmelCase , UpperCAmelCase : int = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case , )
UpperCAmelCase : int = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , snake_case , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.features[i]
UpperCAmelCase : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
UpperCAmelCase : Any = torch.tensor(feature.attention_mask , dtype=torch.long )
UpperCAmelCase : int = torch.tensor(feature.token_type_ids , dtype=torch.long )
UpperCAmelCase : Optional[int] = torch.tensor(feature.cls_index , dtype=torch.long )
UpperCAmelCase : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float )
UpperCAmelCase : str = torch.tensor(feature.is_impossible , dtype=torch.float )
UpperCAmelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
UpperCAmelCase : Tuple = torch.tensor(feature.start_position , dtype=torch.long )
UpperCAmelCase : str = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 609
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Dict = SamImageProcessor()
UpperCAmelCase : Tuple = SamProcessor(snake_case )
processor.save_pretrained(self.tmpdirname )
def A_ ( self , **snake_case ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor
def A_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Tuple = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
UpperCAmelCase : str = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_image_processor()
UpperCAmelCase : Tuple = SamProcessor(image_processor=snake_case )
UpperCAmelCase : str = self.prepare_image_inputs()
UpperCAmelCase : Dict = image_processor(snake_case , return_tensors="np" )
UpperCAmelCase : str = processor(images=snake_case , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_image_processor()
UpperCAmelCase : Dict = SamProcessor(image_processor=snake_case )
UpperCAmelCase : Tuple = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase : int = [[1_7_6_4, 2_6_4_6]]
UpperCAmelCase : Optional[int] = [[6_8_3, 1_0_2_4]]
UpperCAmelCase : str = processor.post_process_masks(snake_case , snake_case , snake_case )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase : Union[str, Any] = processor.post_process_masks(
snake_case , torch.tensor(snake_case ) , torch.tensor(snake_case ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
UpperCAmelCase : Optional[int] = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase : Union[str, Any] = processor.post_process_masks(snake_case , np.array(snake_case ) , np.array(snake_case ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(snake_case ):
UpperCAmelCase : str = processor.post_process_masks(snake_case , np.array(snake_case ) , np.array(snake_case ) )
@require_vision
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Optional[int] = SamImageProcessor()
UpperCAmelCase : str = SamProcessor(snake_case )
processor.save_pretrained(self.tmpdirname )
def A_ ( self , **snake_case ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor
def A_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase : Any = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
UpperCAmelCase : str = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.get_image_processor()
UpperCAmelCase : Union[str, Any] = SamProcessor(image_processor=snake_case )
UpperCAmelCase : List[str] = self.prepare_image_inputs()
UpperCAmelCase : Union[str, Any] = image_processor(snake_case , return_tensors="np" )
UpperCAmelCase : Any = processor(images=snake_case , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.get_image_processor()
UpperCAmelCase : List[Any] = SamProcessor(image_processor=snake_case )
UpperCAmelCase : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase : Optional[int] = [[1_7_6_4, 2_6_4_6]]
UpperCAmelCase : int = [[6_8_3, 1_0_2_4]]
UpperCAmelCase : str = processor.post_process_masks(snake_case , snake_case , snake_case , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase : int = processor.post_process_masks(
snake_case , tf.convert_to_tensor(snake_case ) , tf.convert_to_tensor(snake_case ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
UpperCAmelCase : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase : Tuple = processor.post_process_masks(
snake_case , np.array(snake_case ) , np.array(snake_case ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
UpperCAmelCase : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase : Any = processor.post_process_masks(
snake_case , np.array(snake_case ) , np.array(snake_case ) , return_tensors="tf" )
@require_vision
@require_torchvision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = tempfile.mkdtemp()
UpperCAmelCase : Tuple = SamImageProcessor()
UpperCAmelCase : Optional[int] = SamProcessor(snake_case )
processor.save_pretrained(self.tmpdirname )
def A_ ( self , **snake_case ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor
def A_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase : int = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.get_image_processor()
UpperCAmelCase : int = SamProcessor(image_processor=snake_case )
UpperCAmelCase : int = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase : Tuple = [tf.convert_to_tensor(snake_case )]
UpperCAmelCase : Dict = [torch.tensor(snake_case )]
UpperCAmelCase : List[Any] = [[1_7_6_4, 2_6_4_6]]
UpperCAmelCase : List[Any] = [[6_8_3, 1_0_2_4]]
UpperCAmelCase : Union[str, Any] = processor.post_process_masks(
snake_case , snake_case , snake_case , return_tensors="tf" )
UpperCAmelCase : int = processor.post_process_masks(
snake_case , snake_case , snake_case , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.get_image_processor()
UpperCAmelCase : str = SamProcessor(image_processor=snake_case )
UpperCAmelCase : int = self.prepare_image_inputs()
UpperCAmelCase : Optional[Any] = image_processor(snake_case , return_tensors="pt" )["pixel_values"].numpy()
UpperCAmelCase : Optional[int] = processor(images=snake_case , return_tensors="pt" )["pixel_values"].numpy()
UpperCAmelCase : Optional[Any] = image_processor(snake_case , return_tensors="tf" )["pixel_values"].numpy()
UpperCAmelCase : Union[str, Any] = processor(images=snake_case , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertTrue(np.allclose(snake_case , snake_case ) )
| 609
| 1
|
from ... import PretrainedConfig
__A = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCamelCase : Union[str, Any] = 'nezha'
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=21128 , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[Any]=3072 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : int=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=64 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1e-12 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Tuple=True , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =hidden_act
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =max_relative_position
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =classifier_dropout
__UpperCAmelCase =use_cache
| 68
|
from __future__ import annotations
def lowercase__ ( A_: list[list[int]] ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68
| 1
|
def _a ( lowerCamelCase__ = 1_00_00_00 ) -> int:
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : Optional[int] = {1: 1}
for inputa in range(2 , lowerCamelCase__ ):
lowerCamelCase_ : str = 0
lowerCamelCase_ : Tuple = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase_ : Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase_ : List[str] = counter
if counter > pre_counter:
lowerCamelCase_ : Any = inputa
lowerCamelCase_ : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 144
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase__ :
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
}, )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase )}, )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
}, )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
lowerCamelCase_ : bool = field(
default=UpperCAmelCase, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
lowerCamelCase_ : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
lowerCamelCase_ : bool = field(
default=UpperCAmelCase, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def UpperCAmelCase_ (self : Any ) -> List[str]:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class lowerCamelCase__ :
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase_ : Optional[str] = field(default=UpperCAmelCase, metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'}, )
lowerCamelCase_ : Optional[str] = field(
default=UpperCAmelCase, metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'}, )
lowerCamelCase_ : bool = field(
default=UpperCAmelCase, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCamelCase_ : Optional[int] = field(
default=5, metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
}, )
lowerCamelCase_ : Optional[int] = field(
default=UpperCAmelCase, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=UpperCAmelCase, metadata={'help': 'The number of processes to use for the preprocessing.'}, )
lowerCamelCase_ : float = field(
default=0.1_5, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase_ : bool = field(
default=UpperCAmelCase, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
def UpperCAmelCase_ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
if self.train_file is not None:
lowerCamelCase_ : List[str] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ : Any = [json.loads(lowerCamelCase__ ) for line in f.read().splitlines() if (len(lowerCamelCase__ ) > 0 and not line.isspace())]
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
lowerCamelCase_ : str = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase_ : List[Any] = refs
return Dataset.from_dict(lowerCamelCase__ )
def _a ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase_ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowerCamelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
lowerCamelCase_ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
lowerCamelCase_ : List[str] = {}
if data_args.train_file is not None:
lowerCamelCase_ : int = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : str = data_args.validation_file
lowerCamelCase_ : str = data_args.train_file.split('.' )[-1]
if extension == "txt":
lowerCamelCase_ : Optional[int] = 'text'
lowerCamelCase_ : List[str] = load_dataset(lowerCamelCase__ , data_files=lowerCamelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : Optional[Any] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained(model_args.config_name , **lowerCamelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase__ )
else:
lowerCamelCase_ : Any = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
lowerCamelCase_ : Optional[int] = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCamelCase__ )
elif model_args.model_name_or_path:
lowerCamelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCamelCase__ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
lowerCamelCase_ : List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowerCamelCase_ : Union[str, Any] = AutoModelForMaskedLM.from_config(lowerCamelCase__ )
model.resize_token_embeddings(len(lowerCamelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase_ : Dict = datasets['train'].column_names
else:
lowerCamelCase_ : List[str] = datasets['validation'].column_names
lowerCamelCase_ : Any = 'text' if 'text' in column_names else column_names[0]
lowerCamelCase_ : List[Any] = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowerCamelCase__ ):
# Remove empty lines
lowerCamelCase_ : List[str] = [line for line in examples['text'] if len(lowerCamelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=data_args.max_seq_length )
lowerCamelCase_ : Any = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase_ : Dict = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase_ : Tuple = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase_ : Optional[int] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase_ : int = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase_ : Tuple = DataCollatorForWholeWordMask(tokenizer=lowerCamelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase_ : Optional[Any] = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase_ : List[str] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase_ : int = model_args.model_name_or_path
else:
lowerCamelCase_ : Union[str, Any] = None
lowerCamelCase_ : Tuple = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Union[str, Any] = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
lowerCamelCase_ : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase_ : Optional[Any] = trainer.evaluate()
lowerCamelCase_ : Optional[int] = math.exp(eval_output['eval_loss'] )
lowerCamelCase_ : Dict = perplexity
lowerCamelCase_ : Dict = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def _a ( lowerCamelCase__ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 144
| 1
|
from __future__ import annotations
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = None ) -> list[list[str]]:
'''simple docstring'''
lowerCamelCase__: int = word_bank or []
# create a table
lowerCamelCase__: int = len(_UpperCamelCase ) + 1
lowerCamelCase__: list[list[list[str]]] = []
for _ in range(_UpperCamelCase ):
table.append([] )
# seed value
lowerCamelCase__: Tuple = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCamelCase )] == word:
lowerCamelCase__: list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCamelCase )]:
combination.reverse()
return table[len(_UpperCamelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 306
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A__ , unittest.TestCase ):
__lowerCamelCase = LayoutLMTokenizer
__lowerCamelCase = LayoutLMTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self : int , **__a : Union[str, Any] ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCamelCase_ ( self : List[Any] , __a : List[str] ):
'''simple docstring'''
lowerCamelCase__: str = """UNwant\u00E9d,running"""
lowerCamelCase__: Any = """unwanted, running"""
return input_text, output_text
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase__: Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
| 306
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] ) -> int:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = len(lowerCAmelCase__ ) // 2
# choose the middle 3 elements
lowerCAmelCase_ : Optional[int] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VQModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=(3_2, 3_2) ):
lowerCAmelCase_ : Tuple = 4
lowerCAmelCase_ : Optional[Any] = 3
lowerCAmelCase_ : int = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Union[str, Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
lowerCAmelCase_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : str ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Any = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase_ : int = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase_ : Dict = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase_ : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase_ : int = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
| 317
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case : Optional[int] = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 566
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
UpperCamelCase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
UpperCamelCase_ : Optional[int] = object()
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_snake_case = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(_UpperCamelCase ) - len(_UpperCamelCase ) + 1 ):
_snake_case = [x.match(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , ks[i:] )]
if matches and all(_UpperCamelCase ):
return True
return False
def __a ( _UpperCamelCase: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
def replace(_UpperCamelCase: Tuple , _UpperCamelCase: List[str] ):
for rule, replacement in rules:
if _match(_UpperCamelCase , _UpperCamelCase ):
return replacement
return val
return replace
def __a ( ) -> Any:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , _UpperCamelCase )),
(("transformer", "wte", "embedding"), P("mp" , _UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , _UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_UpperCamelCase , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , _UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( _UpperCamelCase: Union[str, Any] ) -> Any:
"""simple docstring"""
_snake_case = _get_partition_rules()
_snake_case = _replacement_rules(_UpperCamelCase )
_snake_case = {k: _unmatched for k in flatten_dict(_UpperCamelCase )}
_snake_case = {k: replace(_UpperCamelCase , _UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_UpperCamelCase ) )
| 185
| 0
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class a__ :
lowerCamelCase__: Optional[int] = None
lowerCamelCase__: List[Any] = False
lowerCamelCase__: int = False
lowerCamelCase__: List[Any] = False
lowerCamelCase__: List[Any] = None
lowerCamelCase__: Dict = None
lowerCamelCase__: Dict = False
lowerCamelCase__: List[Any] = False
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Dict = True
lowerCamelCase__: int = None
lowerCamelCase__: List[str] = 1
lowerCamelCase__: List[str] = None
lowerCamelCase__: int = False
lowerCamelCase__: Any = None
lowerCamelCase__: Dict = None
def UpperCAmelCase( self : Optional[int] ):
return self.__class__(**{k: copy.deepcopy(A__ ) for k, v in self.__dict__.items()} )
| 700
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( lowerCAmelCase_ ):
def __init__( self : List[str] , lowerCamelCase_ : CLIPSegForImageSegmentation , lowerCamelCase_ : CLIPSegProcessor , lowerCamelCase_ : AutoencoderKL , lowerCamelCase_ : CLIPTextModel , lowerCamelCase_ : CLIPTokenizer , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ : StableDiffusionSafetyChecker , lowerCamelCase_ : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
a_ : Dict = (
F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , lowerCamelCase_ , standard_warn=lowerCamelCase_ )
a_ : Union[str, Any] = dict(scheduler.config )
a_ : Optional[int] = 1
a_ : int = FrozenDict(lowerCamelCase_ )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
a_ : List[str] = (
F'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , lowerCamelCase_ , standard_warn=lowerCamelCase_ )
a_ : List[str] = dict(scheduler.config )
a_ : List[str] = True
a_ : str = FrozenDict(lowerCamelCase_ )
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=lowerCamelCase_ , segmentation_processor=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , )
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a_ : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def UpperCAmelCase( self : str ):
self.enable_attention_slicing(lowerCamelCase_ )
def UpperCAmelCase( self : Any ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
a_ : Any = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ , lowerCamelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase( self : int ):
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Any , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCamelCase_ : str , lowerCamelCase_ : int = 5_1_2 , lowerCamelCase_ : int = 5_1_2 , lowerCamelCase_ : int = 5_0 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : Any , ):
a_ : str = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
a_ : List[str] = self.segmentation_model(**lowerCamelCase_ )
a_ : str = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
a_ : Optional[int] = self.numpy_to_pil(lowerCamelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
a_ : Any = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , )
| 478
| 0
|
'''simple docstring'''
import operator as op
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = []
lowerCAmelCase = lambda lowerCamelCase , lowerCamelCase : int(x / y ) # noqa: E731 integer division operation
lowerCAmelCase = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(snake_case__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(snake_case__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(snake_case__ ) , sep=' | ' )
else:
lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(snake_case__ ) , sep=' | ' )
lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(snake_case__ ) , sep=' | ' )
stack.append(
str(opr[x](int(snake_case__ ) , int(snake_case__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(snake_case__ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__snake_case =input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 133
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = GPTSwaTokenizer
_a = False
_a = True
_a = False
def __lowercase ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = GPTSwaTokenizer(lowerCAmelCase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Tuple ):
lowerCAmelCase = """This is a test"""
lowerCAmelCase = """This is a test"""
return input_text, output_text
def __lowercase ( self : List[Any] ):
lowerCAmelCase = """<s>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def __lowercase ( self : Any ):
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowerCAmelCase ) , 2000 )
def __lowercase ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = GPTSwaTokenizer(lowerCAmelCase )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [465, 287, 265, 631, 842] )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
lowerCAmelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
# fmt: off
self.assertListEqual(
lowerCAmelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def __lowercase ( self : int ):
lowerCAmelCase = GPTSwaTokenizer(lowerCAmelCase )
lowerCAmelCase = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCAmelCase = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(lowerCAmelCase ) , lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(lowerCAmelCase ) , lowerCAmelCase )
@slow
def __lowercase ( self : Any ):
lowerCAmelCase = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCAmelCase = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=lowerCAmelCase , )
| 169
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase : Optional[int] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", F"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", F"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qpos_proj.weight", F"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kpos_proj.weight", F"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.weight", F"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", F"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", F"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kpos_proj.weight", F"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.weight", F"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", F"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", F"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", F"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_qpos_proj.bias", F"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_kpos_proj.bias", F"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.bias", F"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", F"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", F"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_kpos_proj.bias", F"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.bias", F"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", F"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : Any = state_dict.pop(lowerCamelCase_ )
A : str = val
def snake_case__ ( lowerCamelCase_ ):
A : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A : Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A : Dict = value
else:
A : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=False ):
A : Dict = ''''''
if is_panoptic:
A : str = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A : List[str] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A : Union[str, Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A : List[Any] = in_proj_weight[:256, :]
A : Union[str, Any] = in_proj_bias[:256]
A : Optional[int] = in_proj_weight[256:512, :]
A : str = in_proj_bias[256:512]
A : str = in_proj_weight[-256:, :]
A : int = in_proj_bias[-256:]
def snake_case__ ( ):
A : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A : Any = '''resnet101'''
if "dc5" in model_name:
A : Any = True
A : str = '''panoptic''' in model_name
if is_panoptic:
A : Union[str, Any] = 250
else:
A : Any = 91
A : Optional[int] = '''huggingface/label-files'''
A : Union[str, Any] = '''coco-detection-id2label.json'''
A : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
A : List[str] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
A : List[Any] = idalabel
A : Dict = {v: k for k, v in idalabel.items()}
# load image processor
A : Tuple = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
A : Dict = ConditionalDetrImageProcessor(format=lowerCamelCase_ )
# prepare image
A : Optional[Any] = prepare_img()
A : str = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' )
A : Union[str, Any] = encoding['''pixel_values''']
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
A : Optional[Any] = torch.hub.load('''DeppMeng/ConditionalDETR''' , lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval()
A : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A : Union[str, Any] = '''conditional_detr.''' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A : str = rename_backbone_keys(lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A : List[str] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
A : int = state_dict.pop(lowerCamelCase_ )
A : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A : Optional[int] = state_dict.pop(lowerCamelCase_ )
A : List[Any] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
A : Optional[Any] = state_dict.pop(lowerCamelCase_ )
A : List[str] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A : str = state_dict.pop(lowerCamelCase_ )
A : Tuple = val
# finally, create HuggingFace model and load state dict
A : Dict = ConditionalDetrForSegmentation(lowerCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
model.push_to_hub(repo_id=lowerCamelCase_ , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
A : Union[str, Any] = conditional_detr(lowerCamelCase_ )
A : int = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowercase : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 700
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''facebook/bart-large-mnli'''
UpperCAmelCase_ : Optional[int] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
UpperCAmelCase_ : Optional[Any] = '''text_classifier'''
UpperCAmelCase_ : str = AutoTokenizer
UpperCAmelCase_ : int = AutoModelForSequenceClassification
UpperCAmelCase_ : Union[str, Any] = ['''text''', ['''text''']]
UpperCAmelCase_ : Tuple = ['''text''']
def snake_case ( self ) -> List[str]:
super().setup()
A : int = self.model.config
A : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
A : Dict = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
A : List[str] = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [f'This example is {label}' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def snake_case ( self , __UpperCAmelCase ) -> Tuple:
A : int = outputs.logits
A : int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 423
| 0
|
def __snake_case ( _UpperCamelCase ) -> int:
_a = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __snake_case ( _UpperCamelCase = 1_00 ) -> int:
_a = 1
_a = 2
for i in range(2 , max_n + 1 ):
_a = pre_numerator
_a = 2 * i // 3 if i % 3 == 0 else 1
_a = cur_numerator
_a = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 487
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Any:
_a = old_name
if "patch_embed" in old_name:
_a , _a , _a = old_name.split('''.''' )
if layer == "0":
_a = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
_a = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
_a = old_name.replace('''3''' , '''convolution2''' )
else:
_a = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _UpperCamelCase ):
_a = R'''\b\d{2}\b'''
if bool(re.search(_UpperCamelCase , _UpperCamelCase ) ):
_a = re.search(R'''\d\.\d\d.''' , _UpperCamelCase ).group()
else:
_a = re.search(R'''\d\.\d.''' , _UpperCamelCase ).group()
if int(match[0] ) < 6:
_a = old_name.replace(_UpperCamelCase , '''''' )
_a = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
_a = '''intermediate_stages.''' + trimmed_name
else:
_a = old_name.replace(_UpperCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
_a = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
_a = str(int(match[2] ) - num_meta4D_last_stage )
_a = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
_a = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
_a = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
_a = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
_a = trimmed_name.replace('''fc2''' , '''linear_out''' )
_a = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _UpperCamelCase ):
_a = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
_a = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_a = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_a = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
_a = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
_a = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
_a = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
_a = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_a = new_name.replace('''norm''' , '''layernorm''' )
_a = '''efficientformer.''' + new_name
else:
_a = '''efficientformer.encoder.''' + new_name
return new_name
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> str:
for key in checkpoint.copy().keys():
_a = checkpoint.pop(_UpperCamelCase )
_a = val
return checkpoint
def __snake_case ( ) -> int:
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
_a = torch.load(_UpperCamelCase , map_location='''cpu''' )['''model''']
_a = EfficientFormerConfig.from_json_file(_UpperCamelCase )
_a = EfficientFormerForImageClassificationWithTeacher(_UpperCamelCase )
_a = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
_a = config.depths[-1] - config.num_metaad_blocks + 1
_a = convert_torch_checkpoint(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
_a = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
_a = prepare_img()
_a = 2_56
_a = 2_24
_a = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
_a = processor(images=_UpperCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
_a = Compose(
[
Resize(_UpperCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
Normalize(_UpperCamelCase , _UpperCamelCase ),
] )
_a = image_transforms(_UpperCamelCase ).unsqueeze(0 )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase )
_a = model(_UpperCamelCase )
_a = outputs.logits
_a = (1, 10_00)
if "l1" in model_name:
_a = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , _UpperCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_a = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , _UpperCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_a = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(_UpperCamelCase )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message='''Add model''' , use_temp_dir=_UpperCamelCase , )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message='''Add image processor''' , use_temp_dir=_UpperCamelCase , )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase :int = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 487
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def a ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
__lowerCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__lowerCAmelCase = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
__lowerCAmelCase = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ).loss
__lowerCAmelCase = -tf.math.reduce_mean(SCREAMING_SNAKE_CASE__ ).numpy()
__lowerCAmelCase = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 330
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int = 10_00 ) -> int:
'''simple docstring'''
__lowerCAmelCase = -1
__lowerCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCAmelCase = n - a - b
if c * c == (a * a + b * b):
__lowerCAmelCase = a * b * c
if candidate >= product:
__lowerCAmelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 330
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a_ ( self ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = self.dummy_uncond_unet
UpperCAmelCase = ScoreSdeVeScheduler()
UpperCAmelCase = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_ ).images
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_ , return_dict=lowercase_ )[
0
]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a_ ( self ) -> str:
UpperCAmelCase = 'google/ncsnpp-church-256'
UpperCAmelCase = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase = ScoreSdeVeScheduler.from_pretrained(lowercase_ )
UpperCAmelCase = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=lowercase_ ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
UpperCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 373
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[str] = "sew-d"
def __init__( self , lowercase_=3_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_=2 , lowercase_=5_1_2 , lowercase_=2_5_6 , lowercase_=True , lowercase_=True , lowercase_=("p2c", "c2p") , lowercase_="layer_norm" , lowercase_="gelu_python" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.0_2 , lowercase_=1E-7 , lowercase_=1E-5 , lowercase_="group" , lowercase_="gelu" , lowercase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase_=False , lowercase_=1_2_8 , lowercase_=1_6 , lowercase_=True , lowercase_=0.0_5 , lowercase_=1_0 , lowercase_=2 , lowercase_=0.0 , lowercase_=1_0 , lowercase_=0 , lowercase_="mean" , lowercase_=False , lowercase_=False , lowercase_=2_5_6 , lowercase_=0 , lowercase_=1 , lowercase_=2 , **lowercase_ , ) -> Dict:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_norm
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = squeeze_factor
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = position_buckets
UpperCAmelCase = share_att_key
UpperCAmelCase = relative_attention
UpperCAmelCase = norm_rel_ebd
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = feature_layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# sequence classification
UpperCAmelCase = use_weighted_layer_sum
UpperCAmelCase = classifier_proj_size
@property
def a_ ( self ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 373
| 1
|
def __A ( _lowercase ):
'''simple docstring'''
if length <= 0 or not isinstance(a_ , a_ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(a_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 712
|
def __A ( _lowercase = 1_00_00_00 ):
'''simple docstring'''
_A = 1
_A = 1
_A = {1: 1}
for inputa in range(2 , _lowercase ):
_A = 0
_A = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A = (3 * number) + 1
counter += 1
if inputa not in counters:
_A = counter
if counter > pre_counter:
_A = inputa
_A = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62
| 0
|
import os
from collections.abc import Iterator
def __UpperCAmelCase ( UpperCAmelCase = "." )-> int:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(UpperCAmelCase ):
lowercase = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCAmelCase, UpperCAmelCase ).lstrip('''./''' )
def __UpperCAmelCase ( UpperCAmelCase )-> Union[str, Any]:
"""simple docstring"""
return f'{i * " "}*' if i else "\n##"
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
lowercase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(UpperCAmelCase )} {new_part.replace("_", " " ).title()}' )
return new_path
def __UpperCAmelCase ( UpperCAmelCase = "." )-> Tuple:
"""simple docstring"""
lowercase = ''''''
for filepath in sorted(good_file_paths(UpperCAmelCase ) ):
lowercase ,lowercase = os.path.split(UpperCAmelCase )
if filepath != old_path:
lowercase = print_path(UpperCAmelCase, UpperCAmelCase )
lowercase = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase = f'{filepath}/{filename}'.replace(''' ''', '''%20''' )
lowercase = os.path.splitext(filename.replace('''_''', ''' ''' ).title() )[0]
print(f'{md_prefix(UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(".")
| 604
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_SCREAMING_SNAKE_CASE : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : int = 1_0000
lowerCAmelCase_ : Optional[List[str]] = None
lowerCAmelCase_ : Optional[datasets.Features] = None
class _snake_case ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ : Tuple = ParquetConfig
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
snake_case_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
snake_case_ = data_files
if isinstance(a__ , a__ ):
snake_case_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
snake_case_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , "rb" ) as f:
snake_case_ = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase__ ( self , a__ ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case_ = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , "rb" ) as f:
snake_case_ = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
snake_case_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(a__ )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(a__ )}: {e}' )
raise
| 400
| 0
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
try:
_snake_case : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_snake_case : str = default
else:
# KEY is set, convert it to True or False.
try:
_snake_case : Optional[int] = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
lowercase_ : str = parse_flag_from_env('''RUN_SLOW''', default=False)
def A__( __lowerCAmelCase ):
return unittest.skip('Test was skipped' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__lowerCAmelCase )
def A__( __lowerCAmelCase=None , __lowerCAmelCase=None ):
if test_case is None:
return partial(__lowerCAmelCase , version=__lowerCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , __lowerCAmelCase ) , F'''test requires torch version >= {version}''' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__lowerCAmelCase )
lowercase_ : Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A__( __lowerCAmelCase ):
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__lowerCAmelCase )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = True
@classmethod
def __UpperCAmelCase ( cls : List[Any] ):
'''simple docstring'''
_snake_case : str = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls : int ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCamelCase_ )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Union[mock.Mock, List[mock.Mock]] ):
'''simple docstring'''
_snake_case : Tuple = mocks if isinstance(lowerCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def A__( __lowerCAmelCase ):
_snake_case : List[Any] = AcceleratorState()
_snake_case : Union[str, Any] = tensor[None].clone().to(state.device )
_snake_case : str = gather(__lowerCAmelCase ).cpu()
_snake_case : Optional[int] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __lowerCAmelCase ):
return False
return True
class lowercase :
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = returncode
_snake_case : Optional[Any] = stdout
_snake_case : str = stderr
async def A__( __lowerCAmelCase , __lowerCAmelCase ):
while True:
_snake_case : Optional[Any] = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def A__( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False ):
if echo:
print('\nRunning: ' , ' '.join(__lowerCAmelCase ) )
_snake_case : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_snake_case : List[str] = []
_snake_case : Optional[int] = []
def tee(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="" ):
_snake_case : Tuple = line.decode('utf-8' ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1_80 , __lowerCAmelCase=False , __lowerCAmelCase=True ):
_snake_case : Optional[int] = asyncio.get_event_loop()
_snake_case : Tuple = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
_snake_case : Optional[int] = ' '.join(__lowerCAmelCase )
if result.returncode > 0:
_snake_case : Optional[int] = '\n'.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class lowercase ( a_ ):
"""simple docstring"""
pass
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
try:
_snake_case : Optional[Any] = subprocess.check_output(__lowerCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__lowerCAmelCase , 'decode' ):
_snake_case : List[Any] = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(__lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 717
|
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652
| 0
|
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_lowerCAmelCase = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
super().__init__(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , )
A_ : str = None
def lowerCamelCase(self , lowerCAmelCase_ ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
A_ : List[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ : Tuple = str(distributed_port + 1 )
A_ : Any = dist.new_group(ranks=lowerCAmelCase_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase(self ):
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa ):
A_ : Tuple = torch.empty(lowerCAmelCase_ , dtype=lowerCAmelCase_ )
dist.scatter(lowerCAmelCase_ , src=0 , scatter_list=lowerCAmelCase_ , group=self.process_group )
return target_tensor
def lowerCamelCase(self ):
A_ : Optional[Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ : List[Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , lowerCAmelCase_ )
return ifname
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
# single GPU training
if not dist.is_initialized():
A_ , A_ : Tuple = self._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase_ )
# distributed training
A_ : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
A_ : str = None
if self._is_main():
A_ : Union[str, Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase_ )]
dist.gather(torch.tensor(lowerCAmelCase_ ) , dst=0 , gather_list=lowerCAmelCase_ , group=self.process_group )
# scatter logic
A_ : List[Any] = question_hidden_states.shape[0]
A_ : Optional[Any] = []
A_ : Dict = []
if self._is_main():
assert len(lowerCAmelCase_ ) == world_size
A_ , A_ : List[Any] = self._main_retrieve(torch.cat(lowerCAmelCase_ ).numpy() , lowerCAmelCase_ )
A_ , A_ : int = torch.tensor(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
A_ : Tuple = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : Tuple = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : str = self._scattered(lowerCAmelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
A_ : int = self._scattered(lowerCAmelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase_ )
| 180
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __UpperCamelCase ( snake_case__ ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def __UpperCamelCase ( snake_case__ ):
A_ : Union[str, Any] = np.max(_outputs , axis=-1 , keepdims=snake_case__ )
A_ : str = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ )
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Any = """sigmoid"""
_A : Any = """softmax"""
_A : Union[str, Any] = """none"""
@add_end_docstrings(
_SCREAMING_SNAKE_CASE , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = False
_A : Dict = ClassificationFunction.NONE
def __init__(self , **lowerCAmelCase_ ):
super().__init__(**lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCamelCase(self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="" , **lowerCAmelCase_ ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
A_ : Union[str, Any] = tokenizer_kwargs
A_ : List[str] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
A_ : Optional[int] = self.model.config.return_all_scores
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or top_k is None:
A_ : Dict = top_k
A_ : Any = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , lowerCAmelCase_ , )
if return_all_scores:
A_ : List[Any] = None
else:
A_ : List[str] = 1
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A_ : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
A_ : List[str] = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A_ : Union[str, Any] = """top_k""" not in kwargs
if isinstance(args[0] , lowerCAmelCase_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCamelCase(self , lowerCAmelCase_ , **lowerCAmelCase_ ):
A_ : Union[str, Any] = self.framework
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.tokenizer(**lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1 and isinstance(inputs[0] , lowerCAmelCase_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
return self.model(**lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=1 , lowerCAmelCase_=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A_ : Optional[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A_ : Tuple = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
A_ : Any = self.model.config.function_to_apply
else:
A_ : Dict = ClassificationFunction.NONE
A_ : Optional[Any] = model_outputs["""logits"""][0]
A_ : Tuple = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A_ : str = sigmoid(lowerCAmelCase_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A_ : Dict = softmax(lowerCAmelCase_ )
elif function_to_apply == ClassificationFunction.NONE:
A_ : Optional[int] = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A_ : Optional[Any] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(lowerCAmelCase_ )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCAmelCase_ : x["score"] , reverse=lowerCAmelCase_ )
if top_k is not None:
A_ : str = dict_scores[:top_k]
return dict_scores
| 180
| 1
|
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = model.config
__A = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , )
__A = MBartConfig(
is_decoder=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , add_cross_attention=__UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__UpperCamelCase , add_final_layer_norm=__UpperCamelCase , )
return encoder_config, decoder_config
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if "encoder.model" in name:
__A = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
__A = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
__A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__A = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
__A = '''encoder.''' + name
if "attn.proj" in name:
__A = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
__A = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__A = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__A = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__A = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__A = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__A = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
__A = '''encoder.layernorm.bias'''
return name
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
__A = key.split('''.''' )
__A = int(key_split[3] )
__A = int(key_split[5] )
__A = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__A = val[:dim, :]
__A = val[dim : dim * 2, :]
__A = val[-dim:, :]
else:
__A = val[:dim]
__A = val[dim : dim * 2]
__A = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__A = val
return orig_state_dict
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=False ):
"""simple docstring"""
__A = DonutModel.from_pretrained(__UpperCamelCase ).eval()
# load HuggingFace model
__A , __A = get_configs(__UpperCamelCase )
__A = DonutSwinModel(__UpperCamelCase )
__A = MBartForCausalLM(__UpperCamelCase )
__A = VisionEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase )
model.eval()
__A = original_model.state_dict()
__A = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify results on scanned document
__A = load_dataset('''hf-internal-testing/example-documents''' )
__A = dataset['''test'''][0]['''image'''].convert('''RGB''' )
__A = XLMRobertaTokenizerFast.from_pretrained(__UpperCamelCase , from_slow=__UpperCamelCase )
__A = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__A = DonutProcessor(__UpperCamelCase , __UpperCamelCase )
__A = processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__A = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__A = '''When is the coffee break?'''
__A = task_prompt.replace('''{user_input}''' , __UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__A = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__A = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__A = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__A = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__A = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
__A = original_model.decoder.tokenizer(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors='''pt''' )[
'''input_ids'''
]
__A = original_model.encoder.model.patch_embed(__UpperCamelCase )
__A , __A = model.encoder.embeddings(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
# verify encoder hidden states
__A = original_model.encoder(__UpperCamelCase )
__A = model.encoder(__UpperCamelCase ).last_hidden_state
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-2 )
# verify decoder hidden states
__A = original_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).logits
__A = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowercase_ = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 215
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : Any=3, _lowerCamelCase : List[Any]=18, _lowerCamelCase : str=30, _lowerCamelCase : List[Any]=4_00, _lowerCamelCase : List[str]=True, _lowerCamelCase : List[Any]=None, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : List[str]=False, _lowerCamelCase : str=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], _lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5], ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size if size is not None else {'''height''': 18, '''width''': 20}
__A = do_thumbnail
__A = do_align_axis
__A = do_pad
__A = do_normalize
__A = image_mean
__A = image_std
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = DonutImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = DonutImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_thumbnail''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_pad''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 20} )
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {'''height''': 84, '''width''': 42} )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
pass
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
@is_flaky()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
| 215
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=30 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Dict=None , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel(config=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
UpperCAmelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase__ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , tf.keras.layers.Layer ) )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 )
| 82
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82
| 1
|
'''simple docstring'''
from string import ascii_uppercase
lowerCamelCase__ = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCamelCase__ = dict(enumerate(ascii_uppercase))
def _SCREAMING_SNAKE_CASE( snake_case_ : str , snake_case_ : str ) ->str:
'''simple docstring'''
_lowercase : List[str] = len(snake_case_ )
_lowercase : Any = 0
while True:
if x == i:
_lowercase : str = 0
if len(snake_case_ ) == len(snake_case_ ):
break
key += key[i]
i += 1
return key
def _SCREAMING_SNAKE_CASE( snake_case_ : str , snake_case_ : str ) ->str:
'''simple docstring'''
_lowercase : int = ''''''
_lowercase : Optional[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_lowercase : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _SCREAMING_SNAKE_CASE( snake_case_ : str , snake_case_ : str ) ->str:
'''simple docstring'''
_lowercase : Any = ''''''
_lowercase : Union[str, Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_lowercase : List[Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _SCREAMING_SNAKE_CASE( ) ->None:
'''simple docstring'''
_lowercase : int = '''THE GERMAN ATTACK'''
_lowercase : Union[str, Any] = '''SECRET'''
_lowercase : Optional[Any] = generate_key(snake_case_ , snake_case_ )
_lowercase : int = cipher_text(snake_case_ , snake_case_ )
print(F"Encrypted Text = {s}" )
print(F"Original Text = {original_text(snake_case_ , snake_case_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 411
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
_lowercase : List[str] = {
'''input_ids''': tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_lowercase : Any = model(UpperCamelCase_ )['''last_hidden_state''']
_lowercase : List[Any] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
_lowercase : Optional[int] = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 411
| 1
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
snake_case__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
a__: Optional[datasets.Features] = None
def lowerCamelCase_ ( UpperCAmelCase_ : "pyspark.sql.DataFrame" , UpperCAmelCase_ : List[int] , ):
import pyspark
def generate_fn():
lowercase : List[Any] = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
lowercase : Union[str, Any] = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' )
lowercase : Optional[int] = partition_df.collect()
lowercase : Optional[Any] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class UpperCAmelCase ( _BaseExamplesIterable ):
def __init__( self : List[str] , lowerCAmelCase : "pyspark.sql.DataFrame" , lowerCAmelCase : Dict=None , ):
lowercase : List[Any] = df
lowercase : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase : int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : str ):
yield from self.generate_examples_fn()
def _lowerCAmelCase ( self : Any , lowerCAmelCase : np.random.Generator ):
lowercase : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
def _lowerCAmelCase ( self : str , lowerCAmelCase : int , lowerCAmelCase : int ):
lowercase : Tuple = self.split_shard_indices_by_worker(lowerCAmelCase , lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
@property
def _lowerCAmelCase ( self : Optional[int] ):
return len(self.partition_order )
class UpperCAmelCase ( datasets.DatasetBuilder ):
a__: Union[str, Any] = SparkConfig
def __init__( self : Dict , lowerCAmelCase : "pyspark.sql.DataFrame" , lowerCAmelCase : str = None , lowerCAmelCase : str = None , **lowerCAmelCase : Tuple , ):
import pyspark
lowercase : Union[str, Any] = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase : Optional[Any] = df
lowercase : str = working_dir
super().__init__(
cache_dir=lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **lowerCAmelCase , )
def _lowerCAmelCase ( self : List[str] ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCAmelCase : Optional[Any] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCAmelCase )
lowercase : Optional[Any] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCAmelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase : Any = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def _lowerCAmelCase ( self : Dict ):
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _lowerCAmelCase ( self : Dict , lowerCAmelCase : Any ):
import pyspark
def get_arrow_batch_size(lowerCAmelCase : str ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
lowercase : Union[str, Any] = self.df.count()
lowercase : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase : Optional[int] = (
self.df.limit(lowerCAmelCase )
.repartition(1 )
.mapInArrow(lowerCAmelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase : Optional[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase : List[str] = min(lowerCAmelCase , int(approx_total_size / max_shard_size ) )
lowercase : Dict = self.df.repartition(lowerCAmelCase )
def _lowerCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int , ):
import pyspark
lowercase : Union[str, Any] = ParquetWriter if file_format == '''parquet''' else ArrowWriter
lowercase : Any = os.path.join(self._working_dir , os.path.basename(lowerCAmelCase ) ) if self._working_dir else fpath
lowercase : Tuple = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase : int = self.config.features
lowercase : Any = self._writer_batch_size
lowercase : str = self._fs.storage_options
def write_arrow(lowerCAmelCase : int ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase : int = pyspark.TaskContext().taskAttemptId()
lowercase : List[Any] = next(lowerCAmelCase , lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
lowercase : int = 0
lowercase : int = writer_class(
features=lowerCAmelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
lowercase : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase , lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
lowercase : Tuple = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
lowercase : int = pa.Table.from_batches([batch] )
writer.write_table(lowerCAmelCase )
if writer._num_bytes > 0:
lowercase , lowercase : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCAmelCase ) ):
lowercase : Optional[int] = os.path.join(os.path.dirname(lowerCAmelCase ) , os.path.basename(lowerCAmelCase ) )
shutil.move(lowerCAmelCase , lowerCAmelCase )
lowercase : List[str] = (
self.df.mapInArrow(lowerCAmelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase : "datasets.SplitGenerator" , lowerCAmelCase : str = "arrow" , lowerCAmelCase : Optional[Union[str, int]] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Tuple , ):
self._validate_cache_dir()
lowercase : Optional[Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCAmelCase )
lowercase : int = not is_remote_filesystem(self._fs )
lowercase : Dict = os.path.join if is_local else posixpath.join
lowercase : List[str] = '''-TTTTT-SSSSS-of-NNNNN'''
lowercase : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase : List[str] = path_join(self._output_dir , lowerCAmelCase )
lowercase : Union[str, Any] = 0
lowercase : int = 0
lowercase : List[Any] = 0
lowercase : int = []
lowercase : Tuple = []
for task_id, content in self._prepare_split_single(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCAmelCase )
lowercase : str = total_num_examples
lowercase : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase : Dict = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase : List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , ):
rename(
lowerCAmelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , )
lowercase : int = []
lowercase : List[str] = 0
for i in range(len(lowerCAmelCase ) ):
lowercase , lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCAmelCase , len(lowerCAmelCase ) ).map(lambda lowerCAmelCase : _rename_shard(*lowerCAmelCase ) ).collect()
else:
# don't use any pattern
lowercase : List[str] = 0
lowercase : Tuple = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(lowerCAmelCase , '''''' ) , )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 583
|
from PIL import Image
def lowerCamelCase_ ( UpperCAmelCase_ : Image , UpperCAmelCase_ : int ):
lowercase : str = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(UpperCAmelCase_ : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(UpperCAmelCase_ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
snake_case__ = change_contrast(img, 1_70)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 583
| 1
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: int ) -> str:
inspect_dataset(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : List[str] = path + ".py"
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] ) -> Optional[int]:
inspect_metric(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Dict = path + ".py"
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] , lowerCAmelCase: Dict , lowerCAmelCase: List[Any] ) -> Any:
_UpperCAmelCase : Union[str, Any] = get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: str ) -> Tuple:
with pytest.raises(_UpperCAmelCase ):
get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] , lowerCAmelCase: int ) -> Dict:
_UpperCAmelCase : Any = get_dataset_config_names(_UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Any , lowerCAmelCase: Dict ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = get_dataset_infos(_UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
_UpperCAmelCase : str = expected_configs[0]
assert expected_config in infos
_UpperCAmelCase : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple , lowerCAmelCase: str , lowerCAmelCase: List[str] ) -> str:
_UpperCAmelCase : Any = get_dataset_infos(_UpperCAmelCase )
assert expected_config in infos
_UpperCAmelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Optional[int] , lowerCAmelCase: List[Any] ) -> int:
with pytest.raises(_UpperCAmelCase ):
get_dataset_split_names(_UpperCAmelCase , config_name=_UpperCAmelCase )
| 716
|
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Tuple , lowerCAmelCase: Any=None , **lowerCAmelCase: Any ) -> Dict:
_UpperCAmelCase : Optional[Any] = [x.strip() for x in open(lowerCAmelCase ).readlines()]
_UpperCAmelCase : str = [x.strip() for x in open(lowerCAmelCase ).readlines()][: len(lowerCAmelCase )]
_UpperCAmelCase : str = calculate_rouge(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
if save_path is not None:
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 467
| 0
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 19
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _A ( _lowerCAmelCase = "isbn/0140328726" ):
"""simple docstring"""
__lowercase =olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
__lowercase =f"""{olid} is not a valid Open Library olid"""
raise ValueError(_lowerCAmelCase )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase ={
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
__lowercase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__lowercase =[
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
__lowercase =data['First sentence']['value']
for key, value in data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase =', '.join(_lowerCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCamelCase = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(f"\nSearching Open Library for ISBN: {isbn}...\n")
try:
lowerCamelCase = summarize_book(get_openlibrary_data(f"isbn/{isbn}"))
print("""\n""".join(f"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"Sorry, there are no results for ISBN: {isbn}.")
| 474
| 0
|
from math import sqrt
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = 0
for i in range(1 , int(sqrt(UpperCamelCase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase__ ):
total += i + n // i
elif i == sqrt(UpperCamelCase__ ):
total += i
return total - n
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 10_000 ):
'''simple docstring'''
__UpperCamelCase :Any = sum(
i
for i in range(1 , UpperCamelCase__ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase__ ) ) == i and sum_of_divisors(UpperCamelCase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 703
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = None
a__ : Tuple = None
@property
def UpperCamelCase__ ( self) -> Any:
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :str = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(__lowercase , '''feature_size'''))
self.assertTrue(hasattr(__lowercase , '''sampling_rate'''))
self.assertTrue(hasattr(__lowercase , '''padding_value'''))
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :str = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCamelCase :Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Optional[Any] = feat_extract.model_input_names[0]
__UpperCamelCase :Union[str, Any] = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(__lowercase) == len(__lowercase) for x, y in zip(__lowercase , processed_features[input_name])))
__UpperCamelCase :Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowercase)
__UpperCamelCase :Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''')
__UpperCamelCase :Any = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__UpperCamelCase :Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowercase)
__UpperCamelCase :Tuple = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :str = feat_extract.model_input_names[0]
__UpperCamelCase :Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''')
__UpperCamelCase :Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__UpperCamelCase :str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowercase)
__UpperCamelCase :str = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Union[str, Any] = feat_extract.model_input_names[0]
__UpperCamelCase :Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''')
__UpperCamelCase :int = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__UpperCamelCase :List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCamelCase__ ( self , __lowercase=False) -> Dict:
def _inputs_have_equal_length(__lowercase):
__UpperCamelCase :List[str] = len(input[0])
for input_slice in input[1:]:
if len(__lowercase) != length:
return False
return True
def _inputs_are_equal(__lowercase , __lowercase):
if len(__lowercase) != len(__lowercase):
return False
for input_slice_a, input_slice_a in zip(__lowercase , __lowercase):
if not np.allclose(np.asarray(__lowercase) , np.asarray(__lowercase) , atol=1E-3):
return False
return True
__UpperCamelCase :List[str] = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowercase)
__UpperCamelCase :Any = feat_extract.model_input_names[0]
__UpperCamelCase :Optional[Any] = BatchFeature({input_name: speech_inputs})
__UpperCamelCase :Optional[Any] = self.feat_extract_tester.seq_length_diff
__UpperCamelCase :Union[str, Any] = self.feat_extract_tester.max_seq_length + pad_diff
__UpperCamelCase :Tuple = self.feat_extract_tester.min_seq_length
__UpperCamelCase :Optional[int] = self.feat_extract_tester.batch_size
__UpperCamelCase :Any = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__UpperCamelCase :List[Any] = feat_extract.pad(__lowercase , padding=__lowercase)
__UpperCamelCase :Tuple = input_a[input_name]
__UpperCamelCase :int = feat_extract.pad(__lowercase , padding='''longest''')
__UpperCamelCase :int = input_a[input_name]
__UpperCamelCase :Optional[Any] = feat_extract.pad(__lowercase , padding='''max_length''' , max_length=len(speech_inputs[-1]))
__UpperCamelCase :Dict = input_a[input_name]
__UpperCamelCase :List[Any] = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''np''')
__UpperCamelCase :Optional[int] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__lowercase):
feat_extract.pad(__lowercase , padding='''max_length''')[input_name]
__UpperCamelCase :Any = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=__lowercase , return_tensors='''np''')
__UpperCamelCase :Tuple = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__lowercase))
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertTrue(_inputs_are_equal(__lowercase , __lowercase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
__UpperCamelCase :int = feat_extract.pad(__lowercase , pad_to_multiple_of=10)
__UpperCamelCase :Tuple = input_a[input_name]
__UpperCamelCase :Optional[int] = feat_extract.pad(__lowercase , padding='''longest''' , pad_to_multiple_of=10)
__UpperCamelCase :Tuple = input_a[input_name]
__UpperCamelCase :str = feat_extract.pad(
__lowercase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__lowercase)
__UpperCamelCase :Any = input_a[input_name]
__UpperCamelCase :List[str] = feat_extract.pad(
__lowercase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__lowercase , return_tensors='''np''' , )
__UpperCamelCase :List[str] = input_a[input_name]
self.assertTrue(all(len(__lowercase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(__lowercase , __lowercase))
__UpperCamelCase :str = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__lowercase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
__UpperCamelCase :Optional[Any] = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCamelCase__ ( self , __lowercase=False) -> Dict:
def _inputs_have_equal_length(__lowercase):
__UpperCamelCase :Dict = len(input[0])
for input_slice in input[1:]:
if len(__lowercase) != length:
return False
return True
def _inputs_are_equal(__lowercase , __lowercase):
if len(__lowercase) != len(__lowercase):
return False
for input_slice_a, input_slice_a in zip(__lowercase , __lowercase):
if not np.allclose(np.asarray(__lowercase) , np.asarray(__lowercase) , atol=1E-3):
return False
return True
__UpperCamelCase :str = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowercase)
__UpperCamelCase :Tuple = feat_extract.model_input_names[0]
__UpperCamelCase :Dict = BatchFeature({input_name: speech_inputs})
# truncate to smallest
__UpperCamelCase :List[Any] = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]) , truncation=__lowercase)
__UpperCamelCase :str = input_a[input_name]
__UpperCamelCase :Optional[int] = feat_extract.pad(__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]))
__UpperCamelCase :List[str] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertFalse(_inputs_have_equal_length(__lowercase))
# truncate to smallest with np
__UpperCamelCase :Union[str, Any] = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]) , return_tensors='''np''' , truncation=__lowercase , )
__UpperCamelCase :List[Any] = input_a[input_name]
__UpperCamelCase :int = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]) , return_tensors='''np''')
__UpperCamelCase :Optional[Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowercase))
# truncate to middle
__UpperCamelCase :Optional[Any] = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[1]) , truncation=__lowercase , return_tensors='''np''' , )
__UpperCamelCase :Dict = input_a[input_name]
__UpperCamelCase :str = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[1]) , truncation=__lowercase)
__UpperCamelCase :Union[str, Any] = input_a[input_name]
__UpperCamelCase :Optional[Any] = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[1]) , return_tensors='''np''')
__UpperCamelCase :Union[str, Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertTrue(_inputs_are_equal(__lowercase , __lowercase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowercase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowercase):
feat_extract.pad(__lowercase , truncation=__lowercase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowercase):
feat_extract.pad(__lowercase , padding='''longest''' , truncation=__lowercase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowercase):
feat_extract.pad(__lowercase , padding='''longest''' , truncation=__lowercase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__lowercase):
feat_extract.pad(__lowercase , padding='''max_length''' , truncation=__lowercase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__UpperCamelCase :Dict = 12
__UpperCamelCase :str = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__lowercase , truncation=__lowercase , )
__UpperCamelCase :List[Any] = input_a[input_name]
__UpperCamelCase :Tuple = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__lowercase , )
__UpperCamelCase :Any = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__UpperCamelCase :Optional[Any] = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
__UpperCamelCase :Dict = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(__lowercase))
self.assertFalse(_inputs_have_equal_length(__lowercase))
def UpperCamelCase__ ( self) -> Any:
self._check_padding(numpify=__lowercase)
def UpperCamelCase__ ( self) -> Dict:
self._check_padding(numpify=__lowercase)
def UpperCamelCase__ ( self) -> Any:
self._check_truncation(numpify=__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._check_truncation(numpify=__lowercase)
@require_torch
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCamelCase :str = feat_extract.model_input_names[0]
__UpperCamelCase :int = BatchFeature({input_name: speech_inputs})
__UpperCamelCase :List[str] = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''np''')[input_name]
__UpperCamelCase :List[Any] = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''pt''')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = self.feature_extraction_class(**self.feat_extract_dict)
__UpperCamelCase :Any = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCamelCase :Any = feat_extract.model_input_names[0]
__UpperCamelCase :Union[str, Any] = BatchFeature({input_name: speech_inputs})
__UpperCamelCase :str = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''np''')[input_name]
__UpperCamelCase :Optional[Any] = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''tf''')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :List[Any] = self.feat_extract_dict
__UpperCamelCase :Dict = True
__UpperCamelCase :Dict = self.feature_extraction_class(**__lowercase)
__UpperCamelCase :List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCamelCase :Any = [len(__lowercase) for x in speech_inputs]
__UpperCamelCase :int = feat_extract.model_input_names[0]
__UpperCamelCase :Optional[int] = BatchFeature({input_name: speech_inputs})
__UpperCamelCase :Union[str, Any] = feat_extract.pad(__lowercase , padding='''longest''' , return_tensors='''np''')
self.assertIn('''attention_mask''' , __lowercase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[int] = self.feat_extract_dict
__UpperCamelCase :Optional[int] = True
__UpperCamelCase :Dict = self.feature_extraction_class(**__lowercase)
__UpperCamelCase :List[str] = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCamelCase :List[Any] = [len(__lowercase) for x in speech_inputs]
__UpperCamelCase :List[Any] = feat_extract.model_input_names[0]
__UpperCamelCase :int = BatchFeature({input_name: speech_inputs})
__UpperCamelCase :Dict = min(__lowercase)
__UpperCamelCase :Union[str, Any] = feat_extract.pad(
__lowercase , padding='''max_length''' , max_length=__lowercase , truncation=__lowercase , return_tensors='''np''')
self.assertIn('''attention_mask''' , __lowercase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 452
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.