code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = """big_bird"""
def __init__( self , lowerCAmelCase=5_03_58 , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=30_72 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=40_96 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=66 , lowerCAmelCase="block_sparse" , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=64 , lowerCAmelCase=3 , lowerCAmelCase=None , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , sep_token_id=lowerCAmelCase , **lowerCAmelCase , )
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = type_vocab_size
snake_case = layer_norm_eps
snake_case = use_cache
snake_case = rescale_embeddings
snake_case = attention_type
snake_case = use_bias
snake_case = block_size
snake_case = num_random_blocks
snake_case = classifier_dropout
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
@property
def snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 150
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE__ = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case = test_results.split(' ' )
snake_case = 0
snake_case = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case = {}
snake_case = None
snake_case = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , _UpperCamelCase ):
snake_case = True
snake_case = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
snake_case = line
snake_case = False
return failures
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = title
snake_case = doc_test_results['time_spent'].split(',' )[0]
snake_case = doc_test_results['success']
snake_case = doc_test_results['failures']
snake_case = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case = doc_test_results
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = [self._time_spent]
snake_case = 0
for time in time_spent:
snake_case = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase ) == 1:
snake_case = [0, 0, time_parts[0]]
snake_case ,snake_case ,snake_case = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
snake_case ,snake_case ,snake_case = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F"""{int(lowerCAmelCase )}h{int(lowerCAmelCase )}m{int(lowerCAmelCase )}s"""
@property
def snake_case ( self ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = 40
snake_case = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase , lowerCAmelCase )}
snake_case = ''
for category, failures in category_failures.items():
if len(lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase )
@staticmethod
def snake_case ( ):
"""simple docstring"""
snake_case = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=lowerCAmelCase , )
def snake_case ( self ):
"""simple docstring"""
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
snake_case = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.'
snake_case = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=lowerCAmelCase , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = ''
for key, value in failures.items():
snake_case = value[:2_00] + ' [Truncated]' if len(lowerCAmelCase ) > 2_50 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
snake_case = job_name
snake_case = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
snake_case = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case ( self ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
snake_case = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
snake_case = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
snake_case = F"""*Num failures* :{len(job_result["failed"] )} \n"""
snake_case = job_result['failures']
snake_case = self.get_reply_blocks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text=lowerCAmelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F"""Results for {job}""" , blocks=lowerCAmelCase , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
snake_case = os.environ['GITHUB_RUN_ID']
snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
snake_case = requests.get(_UpperCamelCase ).json()
snake_case = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(_UpperCamelCase ):
snake_case = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _UpperCamelCase )
return {}
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
snake_case = {}
if os.path.exists(_UpperCamelCase ):
snake_case = os.listdir(_UpperCamelCase )
for file in files:
try:
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , encoding='utf-8' ) as f:
snake_case = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(_UpperCamelCase , _UpperCamelCase )}.""" ) from e
return _artifact
def lowerCAmelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = name
snake_case = []
def __str__( self ):
"""simple docstring"""
return self.name
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.paths.append({'name': self.name, 'path': path} )
snake_case = {}
snake_case = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case = directory
if artifact_name not in _available_artifacts:
snake_case = Artifact(_UpperCamelCase )
_available_artifacts[artifact_name].add_path(_UpperCamelCase )
return _available_artifacts
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_job_links()
SCREAMING_SNAKE_CASE__ = retrieve_available_artifacts()
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
SCREAMING_SNAKE_CASE__ = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
SCREAMING_SNAKE_CASE__ = github_actions_job_links.get("run_doctests")
SCREAMING_SNAKE_CASE__ = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
SCREAMING_SNAKE_CASE__ = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = handle_test_results(artifact["stats"])
SCREAMING_SNAKE_CASE__ = failed
SCREAMING_SNAKE_CASE__ = success
SCREAMING_SNAKE_CASE__ = time_spent[1:-1] + ", "
SCREAMING_SNAKE_CASE__ = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
SCREAMING_SNAKE_CASE__ = line.replace("FAILED ", "")
SCREAMING_SNAKE_CASE__ = line.split()[0].replace("\n", "")
if "::" in line:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line.split("::")
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
SCREAMING_SNAKE_CASE__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
SCREAMING_SNAKE_CASE__ = all_failures[test] if test in all_failures else "N/A"
SCREAMING_SNAKE_CASE__ = failure
break
SCREAMING_SNAKE_CASE__ = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 150
| 1
|
"""simple docstring"""
def _a ( _snake_case = 1000 ):
"""simple docstring"""
UpperCAmelCase = -1
UpperCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCAmelCase = n - a - b
if c * c == (a * a + b * b):
UpperCAmelCase = a * b * c
if candidate >= product:
UpperCAmelCase = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 234
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
_UpperCamelCase = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_UpperCamelCase = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_UpperCamelCase = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_UpperCamelCase = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_UpperCamelCase = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_UpperCamelCase = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_UpperCamelCase = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_UpperCamelCase = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_UpperCamelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_UpperCamelCase = []
_UpperCamelCase = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_UpperCamelCase = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase = getattr(_snake_case , _snake_case )
if weight_type is not None:
UpperCAmelCase = getattr(_snake_case , _snake_case ).shape
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "running_mean":
UpperCAmelCase = value
elif weight_type == "running_var":
UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase , UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
if task == "s2t":
UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase = MAPPING_S2T
UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase = None
UpperCAmelCase = MAPPING_T2S
UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase = MAPPING_S2S
UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_snake_case , _snake_case ):
logger.info(F'''{name} was ignored''' )
continue
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase , UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(_snake_case )[0].split(""".""" )[-2]
UpperCAmelCase = mapped_key.replace("""*""" , _snake_case )
if "weight_g" in name:
UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase = """weight_v"""
elif "bias" in name:
UpperCAmelCase = """bias"""
elif "weight" in name:
UpperCAmelCase = """weight"""
elif "running_mean" in name:
UpperCAmelCase = """running_mean"""
elif "running_var" in name:
UpperCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
UpperCAmelCase = """num_batches_tracked"""
else:
UpperCAmelCase = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase = name.split(""".""" )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase = config.max_text_positions
UpperCAmelCase = SpeechTaForSpeechToText(_snake_case )
elif task == "t2s":
UpperCAmelCase = 1876
UpperCAmelCase = 600
UpperCAmelCase = config.max_speech_positions
UpperCAmelCase = SpeechTaForTextToSpeech(_snake_case )
elif task == "s2s":
UpperCAmelCase = 1876
UpperCAmelCase = config.max_speech_positions
UpperCAmelCase = SpeechTaForSpeechToSpeech(_snake_case )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
UpperCAmelCase = SpeechTaTokenizer(_snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken("""<mask>""" , lstrip=_snake_case , rstrip=_snake_case )
UpperCAmelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = SpeechTaProcessor(tokenizer=_snake_case , feature_extractor=_snake_case )
processor.save_pretrained(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
recursively_load_weights(fairseq_checkpoint["""model"""] , _snake_case , _snake_case )
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 234
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case_:
def __init__( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int]=1_3 , UpperCamelCase_ : int=3_0 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : Union[str, Any]=3_7 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=1_0 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str=2 , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Any = image_size
lowerCAmelCase : int = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : Tuple = is_training
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : int = type_sequence_label_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : List[str] = scope
lowerCAmelCase : int = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
lowerCAmelCase : Optional[Any] = num_patches + 2
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Any = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Dict ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : List[str] = TFDeiTModel(config=UpperCamelCase_ )
lowerCAmelCase : str = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : int ):
lowerCAmelCase : Tuple = TFDeiTForMaskedImageModeling(config=UpperCamelCase_ )
lowerCAmelCase : Any = model(UpperCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase : int = 1
lowerCAmelCase : Union[str, Any] = TFDeiTForMaskedImageModeling(UpperCamelCase_ )
lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = self.type_sequence_label_size
lowerCAmelCase : int = TFDeiTForImageClassification(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Union[str, Any] = TFDeiTForImageClassification(UpperCamelCase_ )
lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : List[str] = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = config_and_inputs
lowerCAmelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = TFDeiTModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def lowerCamelCase__ ( self : List[str] ):
pass
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Dense ) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = model_class(UpperCamelCase_ )
lowerCAmelCase : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any=False ):
lowerCAmelCase : Tuple = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCamelCase__ ( self : Dict ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFDeiTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _snake_case ( ):
lowerCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class snake_case_( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Dict = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
lowerCAmelCase : str = self.default_image_processor
lowerCAmelCase : str = prepare_img()
lowerCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' )
# forward pass
lowerCAmelCase : Union[str, Any] = model(**UpperCamelCase_ )
# verify the logits
lowerCAmelCase : List[str] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 314
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCAmelCase : Union[str, Any] = 6
lowerCAmelCase : Any = 128
lowerCAmelCase : List[Any] = (2, 2, 18, 2)
lowerCAmelCase : Any = (4, 8, 16, 32)
elif "large" in model_name:
lowerCAmelCase : Tuple = 12
lowerCAmelCase : Dict = 192
lowerCAmelCase : List[str] = (2, 2, 18, 2)
lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowerCAmelCase : Optional[int] = window_size
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : int = num_heads
return config
def _snake_case ( _snake_case : Union[str, Any] ):
if "encoder.mask_token" in name:
lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCAmelCase : Tuple = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowerCAmelCase : str = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowerCAmelCase : Optional[Any] = '''swin.''' + name
return name
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCAmelCase : List[Any] = key.split('''.''' )
lowerCAmelCase : Dict = int(key_split[2] )
lowerCAmelCase : Optional[Any] = int(key_split[4] )
lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase : Dict = val[:dim, :]
lowerCAmelCase : Dict = val[
dim : dim * 2, :
]
lowerCAmelCase : int = val[-dim:, :]
else:
lowerCAmelCase : str = val[
:dim
]
lowerCAmelCase : List[str] = val[
dim : dim * 2
]
lowerCAmelCase : Optional[Any] = val[
-dim:
]
else:
lowerCAmelCase : str = val
return orig_state_dict
def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ):
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model''']
lowerCAmelCase : List[Any] = get_swin_config(_snake_case )
lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case )
model.eval()
lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Dict = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 314
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCAmelCase__ : Optional[int] =imread(r'''digital_image_processing/image_data/lena_small.jpg''')
lowerCAmelCase__ : Any =cvtColor(img, COLOR_BGR2GRAY)
def __lowercase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE = cn.convert_to_negative(a__ )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowercase ( ) -> Union[str, Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a__ , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def __lowercase ( ) -> Dict:
__SCREAMING_SNAKE_CASE = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowercase ( ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__SCREAMING_SNAKE_CASE = canny.canny(a__ )
# assert canny array for at least one True
assert canny_array.any()
def __lowercase ( ) -> List[Any]:
assert gg.gaussian_filter(a__ , 5 , sigma=0.9 ).all()
def __lowercase ( ) -> Optional[Any]:
# laplace diagonals
__SCREAMING_SNAKE_CASE = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__SCREAMING_SNAKE_CASE = conv.img_convolve(a__ , a__ ).astype(a__ )
assert res.any()
def __lowercase ( ) -> Optional[int]:
assert med.median_filter(a__ , 3 ).any()
def __lowercase ( ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sob.sobel_filter(a__ )
assert grad.any() and theta.any()
def __lowercase ( ) -> Dict:
__SCREAMING_SNAKE_CASE = sp.make_sepia(a__ , 20 )
assert sepia.all()
def __lowercase ( a__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[str]:
__SCREAMING_SNAKE_CASE = bs.Burkes(imread(a__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def __lowercase ( a__ = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[Any]:
__SCREAMING_SNAKE_CASE = rs.NearestNeighbour(imread(a__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def __lowercase ( ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__SCREAMING_SNAKE_CASE = imread(a__ , 0 )
# Test for get_neighbors_pixel function() return not None
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = image[x_coordinate][y_coordinate]
__SCREAMING_SNAKE_CASE = lbp.get_neighbors_pixel(
a__ , a__ , a__ , a__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__SCREAMING_SNAKE_CASE = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__SCREAMING_SNAKE_CASE = lbp.local_binary_value(a__ , a__ , a__ )
assert lbp_image.any()
| 257
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ : List[Any] =None
lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : int ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : int ={
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
lowerCAmelCase__ : Dict =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : List[str] = NllbTokenizer
UpperCamelCase__ : List[int] = []
UpperCamelCase__ : List[int] = []
def __init__( self , _A=None , _A=None , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=None , _A=None , _A=None , _A=False , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
__SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
vocab_file=_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , legacy_behaviour=_A , **_A , )
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
__SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else 'eng_Latn'
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang )
__SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _A ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self , _A , _A , _A , _A , **_A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
__SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def _A ( self , _A , _A = "eng_Latn" , _A = None , _A = "fra_Latn" , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def _A ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _A ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
__SCREAMING_SNAKE_CASE = [self.cur_lang_code]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens )
__SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _A ( self , _A , _A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 257
| 1
|
from typing import Any
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
_A = data
_A = None
class a :
"""simple docstring"""
def __init__( self ) -> int:
_A = None
def UpperCAmelCase ( self ) -> str:
_A = self.head
while temp is not None:
print(temp.data , end=""" """ )
_A = temp.next
print()
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_A = Node(lowerCAmelCase_ )
_A = self.head
_A = new_node
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
if node_data_a == node_data_a:
return
else:
_A = self.head
while node_a is not None and node_a.data != node_data_a:
_A = node_a.next
_A = self.head
while node_a is not None and node_a.data != node_data_a:
_A = node_a.next
if node_a is None or node_a is None:
return
_A , _A = node_a.data, node_a.data
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 369
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :torch.FloatTensor
lowerCamelCase :torch.FloatTensor
class a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = 1
@register_to_config
def __init__( self , lowerCAmelCase_ = 20_00 , lowerCAmelCase_ = 0.15 , lowerCAmelCase_ = 0.01 , lowerCAmelCase_ = 1348.0 , lowerCAmelCase_ = 1E-5 , lowerCAmelCase_ = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
_A = sigma_max
# setable values
_A = None
self.set_sigmas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Tuple:
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_A = torch.linspace(1 , lowerCAmelCase_ , lowerCAmelCase_ , device=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Any:
_A = sigma_min if sigma_min is not None else self.config.sigma_min
_A = sigma_max if sigma_max is not None else self.config.sigma_max
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ )
_A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_A = torch.exp(torch.linspace(math.log(lowerCAmelCase_ ) , math.log(lowerCAmelCase_ ) , lowerCAmelCase_ ) )
_A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
_A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_A = timesteps.to(self.discrete_sigmas.device )
_A = self.discrete_sigmas[timesteps].to(sample.device )
_A = self.get_adjacent_sigma(lowerCAmelCase_ , lowerCAmelCase_ ).to(sample.device )
_A = torch.zeros_like(lowerCAmelCase_ )
_A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_A = diffusion.unsqueeze(-1 )
_A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_A = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase_ , device=sample.device , dtype=sample.dtype )
_A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase_ , prev_sample_mean=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_A = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_A = step_size.unsqueeze(-1 )
_A = sample + step_size * model_output
_A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A = timesteps.to(original_samples.device )
_A = self.discrete_sigmas.to(original_samples.device )[timesteps]
_A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None]
)
_A = noise + original_samples
return noisy_samples
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 81
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( _UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =str(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[n]
for i in range(1 , len(_UpperCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
if len(str(_UpperCamelCase ) ) > 3:
if not is_prime(int(str(_UpperCamelCase )[-3:] ) ) or not is_prime(int(str(_UpperCamelCase )[:3] ) ):
return False
return True
def _lowerCAmelCase ( _UpperCamelCase : int = 11 ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =13
while len(_UpperCamelCase ) != count:
if validate(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =list_truncated_nums(_UpperCamelCase )
if all(is_prime(_UpperCamelCase ) for i in list_nums ):
list_truncated_primes.append(_UpperCamelCase )
num += 2
return list_truncated_primes
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(1_1)) = }''')
| 47
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" )
os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,lowerCamelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""model""" ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""bert""" ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "new-model"
try:
AutoConfig.register("""new-model""" ,lowerCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 296
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _snake_case ( unittest.TestCase ):
_lowercase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : int = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = TextaTextGenerationPipeline(model=a , tokenizer=a)
return generator, ["Something to write", "Something else"]
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Any:
SCREAMING_SNAKE_CASE = generator('Something there')
self.assertEqual(a , [{'generated_text': ANY(a)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there'))
SCREAMING_SNAKE_CASE = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
SCREAMING_SNAKE_CASE = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
with self.assertRaises(a):
generator(4)
@require_torch
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = generator(
'Something there' , num_return_sequences=a , num_beams=a , )
SCREAMING_SNAKE_CASE = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(a , a)
SCREAMING_SNAKE_CASE = generator('This is a test' , do_sample=a , num_return_sequences=2 , return_tensors=a)
self.assertEqual(
a , [
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = generator(
['This is a test', 'This is a second test'] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
| 327
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : List[Any] = logging.get_logger(__name__)
a_ : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a_ : str = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
a_ : List[Any] = {'allegro/herbert-base-cased': 5_14}
a_ : Dict = {}
class _snake_case ( A__ ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Any = PRETRAINED_INIT_CONFIGURATION
_lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = HerbertTokenizer
def __init__( self , a=None , a=None , a=None , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a="</s>" , **a , ) -> Dict:
super().__init__(
a , a , tokenizer_file=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , sep_token=a , **a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is None:
return [1] + ([0] * len(a)) + [1]
return [1] + ([0] * len(a)) + [1] + ([0] * len(a)) + [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(a , name=a)
return tuple(a)
| 327
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
_UpperCAmelCase : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for i in range(config.num_hidden_layers ):
_UpperCAmelCase : Optional[int] = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : int = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase : Optional[int] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Any = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : List[str] = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Any = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = dct.pop(__lowerCAmelCase )
_UpperCAmelCase : int = val
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Dict = False
if "vqa" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Optional[Any] = 3_129
_UpperCAmelCase : int = "huggingface/label-files"
_UpperCAmelCase : int = "vqa2-id2label.json"
_UpperCAmelCase : Dict = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Tuple = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Optional[Any] = idalabel
_UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : int = ViltForQuestionAnswering(__lowerCAmelCase )
elif "nlvr" in checkpoint_url:
_UpperCAmelCase : Any = True
_UpperCAmelCase : int = 2
_UpperCAmelCase : Union[str, Any] = {0: "False", 1: "True"}
_UpperCAmelCase : Union[str, Any] = {v: k for k, v in config.idalabel.items()}
_UpperCAmelCase : int = 3
_UpperCAmelCase : Optional[int] = ViltForImagesAndTextClassification(__lowerCAmelCase )
elif "irtr" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Union[str, Any] = ViltForImageAndTextRetrieval(__lowerCAmelCase )
elif "mlm_itm" in checkpoint_url:
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Optional[Any] = ViltForMaskedLM(__lowerCAmelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase : str = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )["state_dict"]
_UpperCAmelCase : Dict = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
if mlm_model or irtr_model:
_UpperCAmelCase : Optional[int] = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowerCAmelCase )
# Define processor
_UpperCAmelCase : Union[str, Any] = ViltImageProcessor(size=384 )
_UpperCAmelCase : Tuple = BertTokenizer.from_pretrained("bert-base-uncased" )
_UpperCAmelCase : Optional[int] = ViltProcessor(__lowerCAmelCase , __lowerCAmelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_UpperCAmelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__lowerCAmelCase ).raw )
_UpperCAmelCase : Optional[Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__lowerCAmelCase ).raw )
_UpperCAmelCase : List[str] = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_UpperCAmelCase : Optional[Any] = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="pt" )
_UpperCAmelCase : Optional[Any] = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="pt" )
_UpperCAmelCase : List[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_UpperCAmelCase : int = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=__lowerCAmelCase ).raw )
if mlm_model:
_UpperCAmelCase : Optional[Any] = "a bunch of [MASK] laying on a [MASK]."
else:
_UpperCAmelCase : Optional[int] = "How many cats are there?"
_UpperCAmelCase : Any = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="pt" )
_UpperCAmelCase : List[Any] = model(**__lowerCAmelCase )
# Verify outputs
if mlm_model:
_UpperCAmelCase : Tuple = torch.Size([1, 11, 30_522] )
_UpperCAmelCase : Optional[Any] = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCAmelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_UpperCAmelCase : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_UpperCAmelCase : Union[str, Any] = torch.Size([1, 3_129] )
_UpperCAmelCase : Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCAmelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_UpperCAmelCase : Any = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_UpperCAmelCase : List[str] = torch.Size([1, 2] )
_UpperCAmelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase__ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 234
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 234
| 1
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : nn.Module , _lowerCAmelCase : int):
'''simple docstring'''
super().__init__()
__lowercase =module
__lowercase =nn.Sequential(
nn.Linear(module.in_features , _lowerCAmelCase , bias=_lowerCAmelCase) , nn.Linear(_lowerCAmelCase , module.out_features , bias=_lowerCAmelCase) , )
__lowercase =(2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_lowerCAmelCase)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : str , *_lowerCAmelCase : Any , **_lowerCAmelCase : Optional[int]):
'''simple docstring'''
return self.module(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase) + self.adapter(_lowerCAmelCase)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """bigscience/bloom-1b7"""
# Constant values
lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74
lowerCAmelCase__ = """Hello my name is"""
lowerCAmelCase__ = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
lowerCAmelCase__ = 10
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(self.model_name)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __lowerCamelCase ( self : Any):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__lowercase =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
__lowercase =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCAmelCase , device_map='auto')
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.model_abit.config
self.assertTrue(hasattr(_lowerCAmelCase , 'quantization_config'))
__lowercase =config.to_dict()
__lowercase =config.to_diff_dict()
__lowercase =config.to_json_string()
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__lowercase =self.model_fpaa.get_memory_footprint()
__lowercase =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
__lowercase =get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_lowerCAmelCase , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.tokenizer(self.input_text , return_tensors='pt')
__lowercase =self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCAmelCase) , self.EXPECTED_OUTPUTS)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =BitsAndBytesConfig()
__lowercase =True
__lowercase =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_lowerCAmelCase , device_map='auto')
__lowercase =self.tokenizer(self.input_text , return_tensors='pt')
__lowercase =model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCAmelCase) , self.EXPECTED_OUTPUTS)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
with self.assertRaises(_lowerCAmelCase), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =BitsAndBytesConfig()
with self.assertRaises(_lowerCAmelCase):
__lowercase =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_lowerCAmelCase , load_in_abit=_lowerCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
with self.assertRaises(_lowerCAmelCase):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(_lowerCAmelCase):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(_lowerCAmelCase):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(_lowerCAmelCase):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_lowerCAmelCase):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase =self.tokenizer(self.input_text , return_tensors='pt')
__lowercase =self.model_fpaa.to(torch.floataa)
__lowercase =self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
__lowercase =self.model_fpaa.to('cpu')
# Check this does not throw an error
__lowercase =self.model_fpaa.half()
# Check this does not throw an error
__lowercase =self.model_fpaa.float()
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_lowerCAmelCase , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCamelCase ( cls : Any):
'''simple docstring'''
__lowercase ='t5-small'
__lowercase ='google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase =AutoTokenizer.from_pretrained(cls.model_name)
__lowercase ='Translate in German: Hello, my dog is cute'
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : str):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__lowercase =TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase =None
# test with `t5-small`
__lowercase =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCAmelCase , device_map='auto')
__lowercase =self.tokenizer(self.input_text , return_tensors='pt').to(0)
__lowercase =model.generate(**_lowerCAmelCase)
# test with `flan-t5-small`
__lowercase =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_lowerCAmelCase , device_map='auto')
__lowercase =self.tokenizer(self.input_text , return_tensors='pt').to(0)
__lowercase =model.generate(**_lowerCAmelCase)
__lowercase =modules
def __lowerCamelCase ( self : int):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCAmelCase , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
__lowercase =self.tokenizer(self.input_text , return_tensors='pt').to(0)
__lowercase =model.generate(**_lowerCAmelCase)
# test with `flan-t5-small`
__lowercase =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_lowerCAmelCase , device_map='auto')
__lowercase =self.tokenizer(self.input_text , return_tensors='pt').to(0)
__lowercase =model.generate(**_lowerCAmelCase)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
super().setUp()
# model_name
__lowercase ='bigscience/bloom-560m'
__lowercase ='t5-small'
# Different types of model
__lowercase =AutoModel.from_pretrained(self.model_name , load_in_abit=_lowerCAmelCase , device_map='auto')
# Sequence classification model
__lowercase =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_lowerCAmelCase , device_map='auto')
# CausalLM model
__lowercase =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCAmelCase , device_map='auto')
# Seq2seq model
__lowercase =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_lowerCAmelCase , device_map='auto')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
super().setUp()
def __lowerCamelCase ( self : Any):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase =self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _UpperCamelCase ( A ):
'''simple docstring'''
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
super().setUp()
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_lowerCAmelCase , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
__lowercase =self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
__lowercase =model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_lowerCAmelCase) , self.EXPECTED_OUTPUTS)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase ='facebook/opt-350m'
super().setUp()
def __lowerCamelCase ( self : int):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
__lowercase =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCAmelCase)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
__lowercase =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase =param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_lowerCAmelCase)):
__lowercase =LoRALayer(module.q_proj , rank=1_6)
__lowercase =LoRALayer(module.k_proj , rank=1_6)
__lowercase =LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
__lowercase =self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase =model.forward(**_lowerCAmelCase)
out.logits.norm().backward()
for module in model.modules():
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(_lowerCAmelCase , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """gpt2-xl"""
lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
| 48
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """BridgeTowerImageProcessor"""
lowerCAmelCase__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]):
'''simple docstring'''
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
def __call__( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
# add pixel_values + pixel_mask
__lowercase =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , **_lowerCAmelCase)
encoding.update(_lowerCAmelCase)
return encoding
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.tokenizer.model_input_names
__lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 48
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for data in source_data:
for i, el in enumerate(_A ):
if len(_A ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_A ) )
return data_lists
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for dlist, weight in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = min(_A )
SCREAMING_SNAKE_CASE__ = max(_A )
SCREAMING_SNAKE_CASE__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE__ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(_A )
score_lists.append(_A )
return score_lists
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_A ):
SCREAMING_SNAKE_CASE__ = final_scores[j] + ele
return final_scores
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_data(_A )
SCREAMING_SNAKE_CASE__ = calculate_each_score(_A , _A )
SCREAMING_SNAKE_CASE__ = generate_final_scores(_A )
# append scores to source data
for i, ele in enumerate(_A ):
source_data[i].append(_A )
return source_data
| 314
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314
| 1
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 ):
_UpperCAmelCase : int = [], []
_UpperCAmelCase : Union[str, Any] = list(zip(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase : List[Any] = sorted_examples[0]
def is_too_big(__lowerCAmelCase ):
return tok(__lowerCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_UpperCAmelCase : Optional[Any] = new_src + " " + src
_UpperCAmelCase : Optional[int] = new_tgt + " " + tgt
if is_too_big(__lowerCAmelCase ) or is_too_big(__lowerCAmelCase ): # cant fit, finalize example
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
_UpperCAmelCase : str = src, tgt
else: # can fit, keep adding
_UpperCAmelCase : Any = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
return finished_src, finished_tgt
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = Path(__lowerCAmelCase )
save_path.mkdir(exist_ok=__lowerCAmelCase )
for split in ["train"]:
_UpperCAmelCase : Optional[Any] = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
_UpperCAmelCase : List[Any] = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
_UpperCAmelCase : Optional[int] = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
_UpperCAmelCase : Union[str, Any] = pack_examples(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
print(F"""packed {split} split from {len(__lowerCAmelCase )} examples -> {len(__lowerCAmelCase )}.""" )
Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(__lowerCAmelCase ) )
Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(__lowerCAmelCase ) )
for split in ["val", "test"]:
_UpperCAmelCase : Dict = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(__lowerCAmelCase , save_path / F"""{split}.source""" )
shutil.copyfile(__lowerCAmelCase , save_path / F"""{split}.target""" )
def __lowerCAmelCase ():
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__lowerCAmelCase , default=128 )
parser.add_argument("--data_dir" , type=__lowerCAmelCase )
parser.add_argument("--save_path" , type=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = parser.parse_args()
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 354
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase__ ) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = [sequences]
_UpperCAmelCase : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = args_parser
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token
try:
_UpperCAmelCase : List[str] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )
except Exception as e:
if "too short" in str(lowerCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCAmelCase : List[Any] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
if kwargs.get("multi_class" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : int = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_UpperCAmelCase : Dict = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Dict = kwargs["hypothesis_template"]
_UpperCAmelCase : List[str] = {}
if "multi_label" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : int , lowerCamelCase__ : Union[str, List[str]] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
pass
elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
_UpperCAmelCase : int = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="This example is {}." ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
_UpperCAmelCase : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase__ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = inputs["candidate_label"]
_UpperCAmelCase : Optional[int] = inputs["sequence"]
_UpperCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCAmelCase : List[Any] = self.model(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [outputs["candidate_label"] for outputs in model_outputs]
_UpperCAmelCase : Any = [outputs["sequence"] for outputs in model_outputs]
_UpperCAmelCase : Optional[int] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
_UpperCAmelCase : Optional[Any] = logits.shape[0]
_UpperCAmelCase : Any = len(lowerCamelCase__ )
_UpperCAmelCase : str = N // n
_UpperCAmelCase : str = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCAmelCase : int = self.entailment_id
_UpperCAmelCase : List[Any] = -1 if entailment_id == 0 else 0
_UpperCAmelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 322
| 0
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _SCREAMING_SNAKE_CASE , )
class __snake_case ( _SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Optional[int] = RobertaConfig
__lowerCamelCase : Union[str, Any] = """roberta"""
def __init__( self , snake_case__ ) -> List[Any]:
'''simple docstring'''
super().__init__(__A )
UpperCAmelCase : Dict =RobertaEmbeddings(__A )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. """ , _SCREAMING_SNAKE_CASE , )
class __snake_case ( _SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Optional[Any] = RobertaConfig
__lowerCamelCase : str = """roberta"""
def __init__( self , snake_case__ ) -> Dict:
'''simple docstring'''
super().__init__(__A )
UpperCAmelCase : List[str] =config.num_labels
UpperCAmelCase : Union[str, Any] =config.num_hidden_layers
UpperCAmelCase : Dict =DeeRobertaModel(__A )
UpperCAmelCase : Optional[int] =nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Dict =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__A )
def UpperCAmelCase__ ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=-1 , snake_case__=False , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =self.num_layers
try:
UpperCAmelCase : Dict =self.roberta(
__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , )
UpperCAmelCase : List[str] =outputs[1]
UpperCAmelCase : List[Any] =self.dropout(__A )
UpperCAmelCase : Any =self.classifier(__A )
UpperCAmelCase : Optional[int] =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase : Tuple =e.message
UpperCAmelCase : Tuple =e.exit_layer
UpperCAmelCase : Dict =outputs[0]
if not self.training:
UpperCAmelCase : Dict =entropy(__A )
UpperCAmelCase : Dict =[]
UpperCAmelCase : int =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : str =MSELoss()
UpperCAmelCase : List[str] =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Optional[int] =CrossEntropyLoss()
UpperCAmelCase : List[str] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase : List[str] =[]
for highway_exit in outputs[-1]:
UpperCAmelCase : int =highway_exit[0]
if not self.training:
highway_logits_all.append(__A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : str =MSELoss()
UpperCAmelCase : List[str] =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : str =CrossEntropyLoss()
UpperCAmelCase : int =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__A )
if train_highway:
UpperCAmelCase : int =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase : Any =(loss,) + outputs
if not self.training:
UpperCAmelCase : Tuple =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase : Any =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 348
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 81
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : str ):
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : str = BlipImageProcessor()
lowerCAmelCase : int = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCAmelCase : int = InstructBlipProcessor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).tokenizer
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).image_processor
def lowercase__ ( self : List[str] , **UpperCAmelCase_ : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).qformer_tokenizer
def lowercase__ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : Dict = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : List[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : int = self.get_tokenizer()
lowerCAmelCase : Tuple = self.get_qformer_tokenizer()
lowerCAmelCase : Optional[int] = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : int = self.prepare_image_inputs()
lowerCAmelCase : Optional[int] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : Dict = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : Any = self.get_qformer_tokenizer()
lowerCAmelCase : int = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : List[str] = 'lower newer'
lowerCAmelCase : int = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Dict = tokenizer(UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = qformer_tokenizer(UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : List[Any] = self.get_qformer_tokenizer()
lowerCAmelCase : Union[str, Any] = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : Any ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : Optional[Any] = self.get_tokenizer()
lowerCAmelCase : List[Any] = self.get_qformer_tokenizer()
lowerCAmelCase : List[Any] = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : str = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : List[Any] = self.get_tokenizer()
lowerCAmelCase : Any = self.get_qformer_tokenizer()
lowerCAmelCase : Tuple = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = 'lower newer'
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : Optional[int] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 371
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: Union[str, Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__magic_name__: int = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCAmelCase_ ( self : List[str] , _A : Any , _A : List[str] , _A : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = TextaTextGenerationPipeline(model=_A , tokenizer=_A )
return generator, ["Something to write", "Something else"]
def UpperCAmelCase_ ( self : Any , _A : Any , _A : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = generator('Something there' )
self.assertEqual(_A , [{'generated_text': ANY(_A )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
snake_case_ : int = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
] , )
snake_case_ : Optional[Any] = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
[{'generated_text': ANY(_A )}, {'generated_text': ANY(_A )}],
] , )
with self.assertRaises(_A ):
generator(4 )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : str = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
snake_case_ : List[Any] = generator('Something there' , do_sample=_A )
self.assertEqual(_A , [{'generated_text': ''}] )
snake_case_ : Optional[Any] = 3
snake_case_ : Any = generator(
'Something there' , num_return_sequences=_A , num_beams=_A , )
snake_case_ : int = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(_A , _A )
snake_case_ : Dict = generator('This is a test' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
snake_case_ : Union[str, Any] = generator.model.config.eos_token_id
snake_case_ : Union[str, Any] = '<pad>'
snake_case_ : str = generator(
['This is a test', 'This is a second test'] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
snake_case_ : Optional[Any] = generator('Something there' , do_sample=_A )
self.assertEqual(_A , [{'generated_text': ''}] )
| 327
|
import sys
_SCREAMING_SNAKE_CASE = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE__ ( __a = N ):
snake_case_ : Optional[Any] = -sys.maxsize - 1
for i in range(len(__a ) - 12 ):
snake_case_ : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case_ : int = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327
| 1
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : Optional[Any] ,A : Tuple=13 ,A : Any=32 ,A : Tuple=2 ,A : List[Any]=3 ,A : Any=16 ,A : Dict=[1, 2, 1] ,A : str=[2, 2, 4] ,A : List[str]=2 ,A : Union[str, Any]=2.0 ,A : Union[str, Any]=True ,A : int=0.0 ,A : List[Any]=0.0 ,A : int=0.1 ,A : Any="gelu" ,A : Optional[int]=False ,A : Optional[Any]=True ,A : Tuple=0.02 ,A : Tuple=1E-5 ,A : int=True ,A : Any=None ,A : str=True ,A : Tuple=10 ,A : str=8 ,A : List[str]=["stage1", "stage2", "stage3"] ,A : List[str]=[1, 2, 3] ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = embed_dim
__A = depths
__A = num_heads
__A = window_size
__A = mlp_ratio
__A = qkv_bias
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = drop_path_rate
__A = hidden_act
__A = use_absolute_embeddings
__A = patch_norm
__A = layer_norm_eps
__A = initializer_range
__A = is_training
__A = scope
__A = use_labels
__A = type_sequence_label_size
__A = encoder_stride
__A = out_features
__A = out_indices
def UpperCamelCase_ ( self : Any ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : List[Any] ):
return MaskFormerSwinConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : Tuple ):
__A = MaskFormerSwinModel(config=_A )
model.to(_A )
model.eval()
__A = model(_A )
__A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ,A : int ,A : Any ):
__A = MaskFormerSwinBackbone(config=_A )
model.to(_A )
model.eval()
__A = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,[16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_A ):
__A = ['stem']
__A = MaskFormerSwinBackbone(config=_A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A = config_and_inputs
__A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = MaskFormerSwinModelTester(self )
__A = ConfigTester(self ,config_class=_A ,embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with"
" `nn.DataParallel`"
) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : Dict ):
return
def UpperCamelCase_ ( self : Any ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_A )
@unittest.skip("Swin does not use inputs_embeds" )
def UpperCamelCase_ ( self : str ):
pass
@unittest.skip("Swin does not support feedforward chunking" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A ,nn.Linear ) )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn\'t support output_attentions" )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def UpperCamelCase_ ( self : List[str] ):
pass
def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ,A : int ,A : Tuple ,A : List[str] ):
__A = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_A ,_A ) )
__A = outputs.hidden_states
__A = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_A ) ,_A )
# Swin has a different seq_length
__A = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs_for_common()
__A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__A = True
self.check_hidden_states_output(_A ,_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
self.check_hidden_states_output(_A ,_A ,_A ,_A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__A = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__A = True
self.check_hidden_states_output(_A ,_A ,_A ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
self.check_hidden_states_output(_A ,_A ,_A ,(padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn\'t have pretrained checkpoints" )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(A : Tuple ):
__A = 0
return t
def check_equivalence(A : Tuple ,A : int ,A : Tuple ,A : List[str]={} ):
with torch.no_grad():
__A = model(**_A ,return_dict=_A ,**_A )
__A = model(**_A ,return_dict=_A ,**_A ).to_tuple()
def recursive_check(A : Optional[Any] ,A : str ):
if isinstance(_A ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_A ,_A ):
recursive_check(_A ,_A )
elif isinstance(_A ,_A ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() ,dict_object.values() ):
recursive_check(_A ,_A )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_A ) ,set_nan_tensor_to_zero(_A ) ,atol=1E-5 ) ,msg=(
"Tuple and dict output are not equal. Difference:"
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_A ).any()} and `inf`: {torch.isinf(_A )}. Dict has'''
f''' `nan`: {torch.isnan(_A ).any()} and `inf`: {torch.isinf(_A )}.'''
) ,)
recursive_check(_A ,_A )
for model_class in self.all_model_classes:
__A = model_class(_A )
model.to(_A )
model.eval()
__A = self._prepare_for_class(_A ,_A )
__A = self._prepare_for_class(_A ,_A )
check_equivalence(_A ,_A ,_A )
__A = self._prepare_for_class(_A ,_A ,return_labels=_A )
__A = self._prepare_for_class(_A ,_A ,return_labels=_A )
check_equivalence(_A ,_A ,_A )
__A = self._prepare_for_class(_A ,_A )
__A = self._prepare_for_class(_A ,_A )
check_equivalence(_A ,_A ,_A ,{"output_hidden_states": True} )
__A = self._prepare_for_class(_A ,_A ,return_labels=_A )
__A = self._prepare_for_class(_A ,_A ,return_labels=_A )
check_equivalence(_A ,_A ,_A ,{"output_hidden_states": True} )
@require_torch
class UpperCAmelCase ( unittest.TestCase , snake_case_ ):
'''simple docstring'''
snake_case_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
snake_case_ = MaskFormerSwinConfig
def UpperCamelCase_ ( self : int ):
__A = MaskFormerSwinModelTester(self )
def UpperCamelCase_ ( self : Any ):
__A = self.model_tester.prepare_config_and_inputs_for_common()
__A = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__A = backbone_class(_A )
backbone.to(_A )
backbone.eval()
__A = backbone(**_A )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps ,_A )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ):
self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__A = backbone(**_A ,output_hidden_states=_A )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__A = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__A = backbone(**_A ,output_attentions=_A )
self.assertIsNotNone(outputs.attentions )
| 350
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE :Tuple = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE :int = 'adapter_config.json'
SCREAMING_SNAKE_CASE :List[str] = 'adapter_model.bin'
SCREAMING_SNAKE_CASE :Any = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE :int = 'tf_model.h5'
SCREAMING_SNAKE_CASE :Tuple = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE :List[Any] = 'model.ckpt'
SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors'
SCREAMING_SNAKE_CASE :Any = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE :int = 'config.json'
SCREAMING_SNAKE_CASE :List[str] = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE :Optional[int] = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE :Optional[Any] = 'generation_config.json'
SCREAMING_SNAKE_CASE :Dict = 'modelcard.json'
SCREAMING_SNAKE_CASE :Optional[Any] = '▁'
SCREAMING_SNAKE_CASE :Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE :Tuple = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE :Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE :Tuple = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
__A = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 124
| 0
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase : Optional[int] = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
lowerCamelCase : List[str] = min(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase : str = (boundary[1] - boundary[0]) / steps
lowerCamelCase : List[str] = boundary[0]
lowerCamelCase : Union[str, Any] = boundary[1]
lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : int = a + h
while x < (b - h):
yield x
lowerCamelCase : List[str] = x + h
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here
lowerCamelCase : str = (x - 0) * (x - 0)
return y
def A ( ) -> int:
lowerCamelCase : int = 0.0 # Lower bound of integration
lowerCamelCase : int = 1.0 # Upper bound of integration
lowerCamelCase : Dict = 10.0 # define number of steps or resolution
lowerCamelCase : int = [a, b] # define boundary of integration
lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 48
| 1
|
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_SCREAMING_SNAKE_CASE : str = True
except ImportError:
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( UpperCAmelCase : Namespace ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __a ( snake_case__ ):
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( lowercase_ : ArgumentParser ):
UpperCamelCase__ : Optional[Any] =parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=lowercase_ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=lowercase_ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowercase_ )
def __init__( self : List[Any] , lowercase_ : bool , lowercase_ : str , lowercase_ : Any=None , *lowercase_ : str ):
UpperCamelCase__ : List[Any] =testing
UpperCamelCase__ : Tuple =testing_file
UpperCamelCase__ : Tuple =path
def _lowerCAmelCase ( self : Any ):
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase__ : Tuple =[directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(lowercase_ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase__ : Any =(
Path(lowercase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase__ : Optional[Any] =path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase_ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase__ : int =json.load(lowercase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase_ , extra_context=lowercase_ , )
UpperCamelCase__ : str =[directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase__ : int =json.load(lowercase_ )
UpperCamelCase__ : int =configuration['''lowercase_modelname''']
UpperCamelCase__ : int =configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f'''{directory}/configuration.json''' )
UpperCamelCase__ : Dict ='''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ : Dict ='''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ : Dict ='''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ : Union[str, Any] =f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(lowercase_ , exist_ok=lowercase_ )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowercase_ )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , '''w''' ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(lowercase_ : Optional[int] ):
with open(lowercase_ , '''r''' ) as f:
UpperCamelCase__ : Union[str, Any] =f.readlines()
with open(lowercase_ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase_ : str , lowercase_ : str , lowercase_ : List[str] ):
# Create temp file
UpperCamelCase__ : Any =mkstemp()
UpperCamelCase__ : Dict =False
with fdopen(lowercase_ , '''w''' ) as new_file:
with open(lowercase_ ) as old_file:
for line in old_file:
new_file.write(lowercase_ )
if line_to_copy_below in line:
UpperCamelCase__ : Union[str, Any] =True
for line_to_copy in lines_to_copy:
new_file.write(lowercase_ )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(lowercase_ , lowercase_ )
# Remove original file
remove(lowercase_ )
# Move new file
move(lowercase_ , lowercase_ )
def skip_units(lowercase_ : Optional[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase_ : Any ):
with open(lowercase_ ) as datafile:
UpperCamelCase__ : Optional[Any] =[]
UpperCamelCase__ : int =False
UpperCamelCase__ : Union[str, Any] =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase__ : Optional[int] =line.split('''"''' )[1]
UpperCamelCase__ : Union[str, Any] =skip_units(lowercase_ )
elif "# Below: " in line and "##" not in line:
UpperCamelCase__ : List[Any] =line.split('''"''' )[1]
UpperCamelCase__ : Optional[Any] =skip_units(lowercase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase__ : Dict =[]
elif "# Replace with" in line and "##" not in line:
UpperCamelCase__ : int =[]
elif "##" not in line:
lines_to_copy.append(lowercase_ )
remove(lowercase_ )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(lowercase_ )
| 355
|
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
_SCREAMING_SNAKE_CASE : List[Any] = """1"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """0"""
_SCREAMING_SNAKE_CASE : List[str] = """1"""
_SCREAMING_SNAKE_CASE : Optional[int] = ort.SessionOptions()
_SCREAMING_SNAKE_CASE : Tuple = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
_SCREAMING_SNAKE_CASE : int = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
_SCREAMING_SNAKE_CASE : Dict = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
_SCREAMING_SNAKE_CASE : Optional[int] = ort.RunOptions()
_SCREAMING_SNAKE_CASE : Tuple = 1_2_8
_SCREAMING_SNAKE_CASE : List[Any] = 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
_SCREAMING_SNAKE_CASE : str = np.ones((batch, sequence), dtype=np.intaa)
_SCREAMING_SNAKE_CASE : int = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
_SCREAMING_SNAKE_CASE : Any = time.time()
_SCREAMING_SNAKE_CASE : str = 2_0_0_0
_SCREAMING_SNAKE_CASE : List[Any] = {}
for iter in range(max_iters):
_SCREAMING_SNAKE_CASE : Any = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 157
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def A__ ( __lowerCamelCase ):
if isinstance(__lowerCamelCase, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class UpperCamelCase__ :
"""simple docstring"""
def _UpperCamelCase ( self , _A , _A ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> str:
pass
def _UpperCamelCase ( self ) -> Tuple:
pass
def _UpperCamelCase ( self , _A , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A=None , **_A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A )
SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A=None , **_A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A )
SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A=None , **_A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A )
SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
SCREAMING_SNAKE_CASE_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
SCREAMING_SNAKE_CASE_ = after_output[0]
SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-3 )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A=None , **_A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_A , _A )
SCREAMING_SNAKE_CASE_ = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
SCREAMING_SNAKE_CASE_ = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
SCREAMING_SNAKE_CASE_ = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE_ = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Dict:
pt_model.to(_A )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE_ = inputs_dict
SCREAMING_SNAKE_CASE_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model(**_A ).to_tuple()
SCREAMING_SNAKE_CASE_ = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_A , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A , from_pt=_A )
SCREAMING_SNAKE_CASE_ = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_A , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel.from_pretrained(_A , from_flax=_A )
pt_model_loaded.to(_A )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_A , pt_output_loaded.numpy() , 4E-2 )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(_A )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A )
SCREAMING_SNAKE_CASE_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A )
SCREAMING_SNAKE_CASE_ = fx_state
self.check_pt_flax_equivalence(_A , _A , _A )
def _UpperCamelCase ( self , _A , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(_A )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_A )
SCREAMING_SNAKE_CASE_ = load_flax_weights_in_pytorch_model(_A , fx_model.params )
self.check_pt_flax_equivalence(_A , _A , _A )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@is_pt_flax_cross_test
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('''vision_config''' )
SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('''text_config''' )
SCREAMING_SNAKE_CASE_ = config_inputs_dict
self.check_equivalence_pt_to_flax(_A , _A , _A )
self.check_equivalence_flax_to_pt(_A , _A , _A )
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE_ = model_a(**_A )
SCREAMING_SNAKE_CASE_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = model_a(**_A )
SCREAMING_SNAKE_CASE_ = after_outputs[0]
SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
@require_flax
class UpperCamelCase__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_A , text_from_pt=_A , )
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _UpperCamelCase ( self , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = FlaxViTModel(_A )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(_A )
return vision_model, text_model
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class UpperCamelCase__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_A , text_from_pt=_A , )
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModel(_A )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(_A )
return vision_model, text_model
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE_ = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=_A , padding=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE_ = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _A , atol=1E-3 ) )
| 299
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCAmelCase : float , UpperCAmelCase : Callable , UpperCAmelCase : int , UpperCAmelCase : float = 1.0 , UpperCAmelCase : str = None , ) -> Union[str, Any]:
super().__init__()
__lowerCAmelCase: Optional[Any] = initial_learning_rate
__lowerCAmelCase: str = warmup_steps
__lowerCAmelCase: Optional[int] = power
__lowerCAmelCase: str = decay_schedule_fn
__lowerCAmelCase: Tuple = name
def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[int]:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCAmelCase: List[str] = tf.cast(UpperCAmelCase , tf.floataa )
__lowerCAmelCase: Tuple = tf.cast(self.warmup_steps , tf.floataa )
__lowerCAmelCase: List[str] = global_step_float / warmup_steps_float
__lowerCAmelCase: List[str] = self.initial_learning_rate * tf.math.pow(UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase , )
def UpperCAmelCase ( self : Tuple ) -> int:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 0.9 , SCREAMING_SNAKE_CASE : float = 0.9_9_9 , SCREAMING_SNAKE_CASE : float = 1E-8 , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
__lowerCAmelCase: Optional[int] = WarmUp(
initial_learning_rate=SCREAMING_SNAKE_CASE , decay_schedule_fn=SCREAMING_SNAKE_CASE , warmup_steps=SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
__lowerCAmelCase: List[Any] = AdamWeightDecay(
learning_rate=SCREAMING_SNAKE_CASE , weight_decay_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase: Dict = tf.keras.optimizers.Adam(
learning_rate=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , beta_a=SCREAMING_SNAKE_CASE , epsilon=SCREAMING_SNAKE_CASE , clipnorm=SCREAMING_SNAKE_CASE , global_clipnorm=SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A_ ( snake_case__ ):
def __init__( self : Tuple , UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.999 , UpperCAmelCase : float = 1E-7 , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "AdamWeightDecay" , **UpperCAmelCase : str , ) -> int:
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCAmelCase: List[Any] = weight_decay_rate
__lowerCAmelCase: List[str] = include_in_weight_decay
__lowerCAmelCase: Optional[Any] = exclude_from_weight_decay
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Tuple ) -> Optional[int]:
__lowerCAmelCase: Union[str, Any] = {'WarmUp': WarmUp}
return super(UpperCAmelCase , cls ).from_config(UpperCAmelCase , custom_objects=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
super(UpperCAmelCase , self )._prepare_local(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> List[str]:
__lowerCAmelCase: Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase: Tuple = list(zip(*UpperCAmelCase ) )
return super(UpperCAmelCase , self ).apply_gradients(zip(UpperCAmelCase , UpperCAmelCase ) , name=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCAmelCase: Dict = apply_state or {}
__lowerCAmelCase: Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCAmelCase: str = self._fallback_apply_state(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Tuple = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]=None ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_dense(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase: Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
__lowerCAmelCase: str = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_sparse(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase: List[str] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return False
return True
class A_ ( snake_case__ ):
def __init__( self : int ) -> List[Any]:
__lowerCAmelCase: Tuple = []
__lowerCAmelCase: int = None
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
if self._accum_steps is None:
__lowerCAmelCase: List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCAmelCase : Any ) -> Any:
if not self._gradients:
__lowerCAmelCase: Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCAmelCase ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCAmelCase )
self._accum_steps.assign_add(1 )
def UpperCAmelCase ( self : int ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCAmelCase ) )
| 322
| 0
|
"""simple docstring"""
import functools
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(UpperCAmelCase_ ) == 0:
return 0
if min(UpperCAmelCase_ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(UpperCAmelCase_ ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Optional[Any] = set(UpperCAmelCase_ )
@functools.cache
def dynamic_programming(_lowerCamelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_lowerCAmelCase : List[Any] = get_logger(__name__)
class A_ :
lowerCAmelCase__ = 'dummy_data'
lowerCAmelCase__ = 'datasets'
lowerCAmelCase__ = False
def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,):
'''simple docstring'''
_lowerCamelCase : str = 0
_lowerCamelCase : List[str] = dataset_name
_lowerCamelCase : Optional[int] = cache_dir
_lowerCamelCase : Optional[int] = use_local_dummy_data
_lowerCamelCase : int = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : Tuple = str(__lowerCAmelCase )
# to be downloaded
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self._dummy_file is None:
_lowerCamelCase : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : Optional[int] = cached_path(
__lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase )
return os.path.join(__lowerCAmelCase ,self.dummy_file_name )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self._bucket_url is None:
_lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase )
else:
return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return path
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return {}
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
for single_url in single_urls:
download_callback(__lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = single_urls
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Union[str, Any] = single_urls
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) )
_lowerCamelCase : List[Any] = value
# make sure that values are unique
if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url )
_lowerCamelCase : Optional[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__lowerCAmelCase )
return dummy_data_list
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
def _iter_archive_members(__lowerCAmelCase: Any ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : Tuple = Path(self.dummy_file ).parent
_lowerCamelCase : str = path.relative_to(__lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase )
_lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__lowerCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
| 340
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase__ ( lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE__ = '''nat'''
SCREAMING_SNAKE_CASE__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=6_4 , UpperCAmelCase=[3, 4, 6, 5] , UpperCAmelCase=[2, 4, 8, 1_6] , UpperCAmelCase=7 , UpperCAmelCase=3.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> List[Any]:
super().__init__(**lowerCamelCase_ )
_lowercase =patch_size
_lowercase =num_channels
_lowercase =embed_dim
_lowercase =depths
_lowercase =len(lowerCamelCase_ )
_lowercase =num_heads
_lowercase =kernel_size
_lowercase =mlp_ratio
_lowercase =qkv_bias
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =drop_path_rate
_lowercase =hidden_act
_lowercase =layer_norm_eps
_lowercase =initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowercase =int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
_lowercase =layer_scale_init_value
_lowercase =["""stem"""] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
_lowercase =get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 5
|
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323
| 0
|
from collections.abc import Sequence
def UpperCamelCase (lowercase_: Sequence[float] , lowercase_: float ) -> float:
return sum(c * (x**i) for i, c in enumerate(lowercase_ ) )
def UpperCamelCase (lowercase_: Sequence[float] , lowercase_: float ) -> float:
A__ : Tuple = 0.0
for coeff in reversed(lowercase_ ):
A__ : Optional[Any] = result * x + coeff
return result
if __name__ == "__main__":
A_ : Tuple = (0.0, 0.0, 5.0, 9.3, 7.0)
A_ : Any = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 141
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : Optional[Any] = 16
A_ : Optional[int] = 32
def UpperCamelCase (lowercase_: Accelerator , lowercase_: int = 16 , lowercase_: str = "bert-base-cased" ) -> List[str]:
A__ : int = AutoTokenizer.from_pretrained(lowercase_ )
A__ : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase_: Tuple ):
# max_length=None => use the model max length (it's actually the default)
A__ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ : int = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
def UpperCamelCase (lowercase_: Dict , lowercase_: Dict , lowercase_: Tuple , lowercase_: Optional[int] ) -> int:
model.eval()
A__ : str = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Any = model(**lowercase_ )
A__ : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__ , A__ : str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase_ ) - 1:
A__ : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ : int = metric.compute()
return eval_metric["accuracy"]
def UpperCamelCase (lowercase_: List[Any] , lowercase_: str ) -> List[str]:
# Initialize accelerator
A__ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : List[Any] = config["""lr"""]
A__ : Union[str, Any] = int(config["""num_epochs"""] )
A__ : List[Any] = int(config["""seed"""] )
A__ : Optional[Any] = int(config["""batch_size"""] )
A__ : Tuple = args.model_name_or_path
set_seed(lowercase_ )
A__ , A__ : Optional[Any] = get_dataloaders(lowercase_ , lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Tuple = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ )
# Instantiate optimizer
A__ : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowercase_ )
if accelerator.state.deepspeed_plugin is not None:
A__ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A__ : Optional[int] = 1
A__ : Optional[int] = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , )
else:
A__ : int = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : str = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
A__ : Dict = 0
# We also need to keep track of the stating epoch so files are named properly
A__ : Any = 0
A__ : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
A__ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
A__ : Tuple = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A__ : Dict = args.resume_from_checkpoint.split("""epoch_""" )[1]
A__ : int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A__ : Any = int(lowercase_ ) + 1
A__ : Any = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.print("""resumed checkpoint performance:""" , lowercase_ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
A__ : int = json.load(lowercase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A__ : Optional[Any] = {}
for epoch in range(lowercase_ , lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
A__ : int = model(**lowercase_ )
A__ : int = outputs.loss
A__ : int = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A__ : Any = f"""epoch_{epoch}"""
A__ : int = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
A__ : List[Any] = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ : Tuple = accuracy
A__ : Optional[Any] = lr_scheduler.get_lr()[0]
A__ : Tuple = optimizer.param_groups[0]["""lr"""]
A__ : int = epoch
A__ : int = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase () -> int:
A__ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase_ , )
parser.add_argument(
"""--output_dir""" , type=lowercase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase_ , default=lowercase_ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase_ , default=lowercase_ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase_ , default=2 , help="""Number of train epochs.""" , )
A__ : List[str] = parser.parse_args()
A__ : List[str] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 141
| 1
|
'''simple docstring'''
def UpperCamelCase_( snake_case : list[list[int | float]] ):
'''simple docstring'''
snake_case_ = len(snake_case )
snake_case_ = len(matrix[0] )
snake_case_ = min(snake_case , snake_case )
for row in range(snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , snake_case ):
snake_case_ = matrix[col][row] / matrix[row][row]
for i in range(snake_case , snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
snake_case_ = True
for i in range(row + 1 , snake_case ):
if matrix[i][row] != 0:
snake_case_ , snake_case_ = matrix[i], matrix[row]
snake_case_ = False
break
if reduce:
rank -= 1
for i in range(snake_case ):
snake_case_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""image_processor""", """tokenizer"""]
_snake_case = """FlavaImageProcessor"""
_snake_case = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , A=None , A=None , **A ) -> Tuple:
snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A , )
snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A , A )
snake_case : Dict = self.image_processor
def __call__( self , A = None , A = None , A = True , A = False , A = False , A = None , A = 0 , A = None , A = None , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> Tuple:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case : str = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
if images is not None:
snake_case : Tuple = self.image_processor(
A , return_image_mask=A , return_codebook_pixels=A , return_tensors=A , **A , )
if text is not None and images is not None:
encoding.update(A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def UpperCAmelCase ( self , *A , **A ) -> List[str]:
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> int:
return self.tokenizer.decode(*A , **A )
@property
def UpperCAmelCase ( self ) -> str:
snake_case : Any = self.tokenizer.model_input_names
snake_case : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> Dict:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , )
return self.image_processor
| 124
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a ={
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[Any]):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : List[Any] = mock.Mock()
__lowerCamelCase : Tuple = 5_0_0
__lowerCamelCase : Dict = {}
__lowerCamelCase : List[Any] = HTTPError
__lowerCamelCase : int = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Optional[int] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=SCREAMING_SNAKE_CASE__) as mock_head:
__lowerCamelCase : Tuple = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase ( self : Any):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : int = mock.Mock()
__lowerCamelCase : List[str] = 5_0_0
__lowerCamelCase : Tuple = {}
__lowerCamelCase : List[str] = HTTPError
__lowerCamelCase : int = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Dict = GPTaTokenizerFast.from_pretrained('gpt2')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=SCREAMING_SNAKE_CASE__) as mock_head:
__lowerCamelCase : Optional[int] = GPTaTokenizerFast.from_pretrained('gpt2')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : str):
# This test is for deprecated behavior and can be removed in v5
try:
__lowerCamelCase : Tuple = tempfile.mktemp()
with open(SCREAMING_SNAKE_CASE__ ,'wb') as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = AlbertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__)
finally:
os.remove(SCREAMING_SNAKE_CASE__)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json'):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb') as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_0_0_0)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json')
def lowerCAmelCase ( self : Optional[Any]):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase : str = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model')
@is_staging_test
class A_ ( unittest.TestCase ):
_UpperCAmelCase : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowerCAmelCase ( cls : Optional[int]):
__lowerCamelCase : Optional[int] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : str):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer')
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer')
except HTTPError:
pass
def lowerCAmelCase ( self : List[str]):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : Union[str, Any] = BertTokenizer(SCREAMING_SNAKE_CASE__)
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token)
__lowerCamelCase : List[str] = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer")
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ,repo_id='test-tokenizer' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token)
__lowerCamelCase : str = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer")
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
def lowerCAmelCase ( self : Any):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : Optional[Any] = BertTokenizer(SCREAMING_SNAKE_CASE__)
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token)
__lowerCamelCase : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token)
__lowerCamelCase : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
@require_tokenizers
def lowerCAmelCase ( self : int):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : Any = CustomTokenizer(SCREAMING_SNAKE_CASE__)
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token)
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" ,trust_remote_code=SCREAMING_SNAKE_CASE__)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : Dict = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__)
bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__)
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token)
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" ,trust_remote_code=SCREAMING_SNAKE_CASE__)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast')
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(
F"{USER}/test-dynamic-tokenizer" ,use_fast=SCREAMING_SNAKE_CASE__ ,trust_remote_code=SCREAMING_SNAKE_CASE__)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer')
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = Trie()
trie.add('Hello 友達')
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}})
trie.add('Hello')
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}})
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : str = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100') ,['[CLS] This is a extra_id_100'])
trie.add('[CLS]')
trie.add('extra_id_1')
trie.add('extra_id_100')
self.assertEqual(trie.split('[CLS] This is a extra_id_100') ,['[CLS]', ' This is a ', 'extra_id_100'])
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = Trie()
trie.add('A')
self.assertEqual(trie.split('ABC') ,['A', 'BC'])
self.assertEqual(trie.split('BCA') ,['BC', 'A'])
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = Trie()
trie.add('TOKEN]')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') ,['This is something ', '[SPECIAL_TOKEN]'])
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Trie()
trie.add('A')
trie.add('P')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') ,['This is something ', '[SPECIAL_TOKEN]'])
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Any = Trie()
trie.add('AB')
trie.add('B')
trie.add('C')
self.assertEqual(trie.split('ABC') ,['AB', 'C'])
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Dict = Trie()
trie.add('ABC')
trie.add('B')
trie.add('CD')
self.assertEqual(trie.split('ABCD') ,['ABC', 'D'])
def lowerCAmelCase ( self : List[Any]):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__lowerCamelCase : List[Any] = Trie()
__lowerCamelCase : Any = trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3])
self.assertEqual(SCREAMING_SNAKE_CASE__ ,['AB', 'C'])
| 113
| 1
|
"""simple docstring"""
def a__ ( snake_case__ ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(snake_case__ , snake_case__ ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(snake_case__ ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _UpperCamelCase ( snake_case__, snake_case__ = True, snake_case__ = math.inf, snake_case__ = -math.inf, snake_case__ = math.inf, snake_case__ = -math.inf, snake_case__ = False, snake_case__ = 100, snake_case__ = 0.01, snake_case__ = 1, ) -> Any:
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Dict = search_prob
__UpperCAmelCase : Tuple = start_temperate
__UpperCAmelCase : Dict = []
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : int = None
while not search_end:
__UpperCAmelCase : str = current_state.score()
if best_state is None or current_score > best_state.score():
__UpperCAmelCase : Union[str, Any] = current_state
scores.append(snake_case__ )
iterations += 1
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__UpperCAmelCase : str = random.randint(0, len(snake_case__ ) - 1 ) # picking a random neighbor
__UpperCAmelCase : Tuple = neighbors.pop(snake_case__ )
__UpperCAmelCase : List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__UpperCAmelCase : Dict = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__UpperCAmelCase : int = picked_neighbor
else:
__UpperCAmelCase : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__UpperCAmelCase : Union[str, Any] = picked_neighbor
__UpperCAmelCase : int = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__UpperCAmelCase : Optional[Any] = True
else:
__UpperCAmelCase : int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case__ ), snake_case__ )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
_snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Tuple:
return (3 * x**2) - (6 * y)
_snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
_snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
| 157
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
def A__ (self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = ViTImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = EfficientFormerImageProcessorTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """size""" ) )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase="None" , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Optional[int]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = relative_attention
_snake_case = position_biased_input
_snake_case = pos_att_type
_snake_case = scope
def lowercase (self ) -> Optional[Any]:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
_snake_case = TFDebertaVaModel(config=UpperCAmelCase )
_snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_snake_case = [input_ids, input_mask]
_snake_case = model(UpperCAmelCase )
_snake_case = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
_snake_case = TFDebertaVaForMaskedLM(config=UpperCAmelCase )
_snake_case = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_snake_case = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
_snake_case = self.num_labels
_snake_case = TFDebertaVaForSequenceClassification(config=UpperCAmelCase )
_snake_case = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_snake_case = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
_snake_case = self.num_labels
_snake_case = TFDebertaVaForTokenClassification(config=UpperCAmelCase )
_snake_case = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_snake_case = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_snake_case = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase )
_snake_case = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_snake_case = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase (self ) -> Optional[Any]:
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
), (
_snake_case
), (
_snake_case
), (
_snake_case
), (
_snake_case
), (
_snake_case
), (
_snake_case
),
) = config_and_inputs
_snake_case = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> str:
_snake_case = TFDebertaVaModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def lowercase (self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase (self ) -> Optional[int]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowercase (self ) -> Union[str, Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def lowercase (self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def lowercase (self ) -> List[Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def lowercase (self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def lowercase (self ) -> List[Any]:
_snake_case = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowercase (self ) -> Any:
pass
@slow
def lowercase (self ) -> Union[str, Any]:
_snake_case = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_snake_case = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_snake_case = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_snake_case = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
_snake_case = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1e-4 )
| 341
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCAmelCase = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__lowerCAmelCase = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
__lowerCAmelCase = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = CHRF.CHAR_ORDER , UpperCAmelCase = CHRF.WORD_ORDER , UpperCAmelCase = CHRF.BETA , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , ) -> int:
_snake_case = len(references[0] )
if any(len(UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
_snake_case = [[refs[i] for refs in references] for i in range(UpperCAmelCase )]
_snake_case = CHRF(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_snake_case = sb_chrf.corpus_score(UpperCAmelCase , UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 341
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( _lowerCAmelCase ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BlipImageProcessor'''
UpperCamelCase = '''AutoTokenizer'''
def __init__( self : List[str], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase : Any = False
super().__init__(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : Tuple = self.image_processor
def __call__( self : str, lowerCAmelCase__ : ImageInput = None, lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False, lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : int = 0, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : Optional[bool] = None, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Optional[Union[str, TensorType]] = None, **lowerCAmelCase__ : Optional[Any], ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase : int = self.tokenizer
_UpperCamelCase : List[str] = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
return text_encoding
# add pixel_values
_UpperCamelCase : List[str] = self.image_processor(lowerCAmelCase__, return_tensors=lowerCAmelCase__ )
if text is not None:
_UpperCamelCase : Any = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
else:
_UpperCamelCase : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def snake_case ( self : List[Any], *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__, **lowerCAmelCase__ )
def snake_case ( self : List[Any], *lowerCAmelCase__ : Dict, **lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__, **lowerCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = self.tokenizer.model_input_names
_UpperCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 128
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase_ =logging.getLogger(__name__)
class _a ( _lowerCAmelCase ):
UpperCamelCase = '''masked_bert'''
def __init__( self : Optional[Any], lowerCAmelCase__ : Dict=3_0_5_2_2, lowerCAmelCase__ : int=7_6_8, lowerCAmelCase__ : Tuple=1_2, lowerCAmelCase__ : Optional[Any]=1_2, lowerCAmelCase__ : Tuple=3_0_7_2, lowerCAmelCase__ : Optional[int]="gelu", lowerCAmelCase__ : Tuple=0.1, lowerCAmelCase__ : Tuple=0.1, lowerCAmelCase__ : Any=5_1_2, lowerCAmelCase__ : Optional[int]=2, lowerCAmelCase__ : Optional[int]=0.02, lowerCAmelCase__ : Union[str, Any]=1e-1_2, lowerCAmelCase__ : Union[str, Any]=0, lowerCAmelCase__ : Dict="topK", lowerCAmelCase__ : Union[str, Any]="constant", lowerCAmelCase__ : Union[str, Any]=0.0, **lowerCAmelCase__ : Any, ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : int = pruning_method
_UpperCamelCase : Union[str, Any] = mask_init
_UpperCamelCase : Any = mask_scale
| 128
| 1
|
'''simple docstring'''
from timeit import timeit
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowercase =0
while number:
number &= number - 1
result += 1
return result
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowercase =0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCamelCase ( ):
'''simple docstring'''
def do_benchmark(lowercase__ : int ) -> None:
__lowercase ='import __main__ as z'
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(lowercase__ ) = }''' )
__lowercase =timeit('z.get_set_bits_count_using_modulo_operator(25)', setup=lowercase__ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(lowercase__ ) = }''' )
__lowercase =timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)', setup=lowercase__, )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(lowercase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 141
|
'''simple docstring'''
from collections.abc import Sequence
def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ):
'''simple docstring'''
__lowercase =0.0
for coeff in reversed(lowercase__ ):
__lowercase =result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 141
| 1
|
import numpy
# List of input, output pairs
_lowerCamelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_lowerCamelCase = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_lowerCamelCase = [2, 4, 1, 5]
_lowerCamelCase = len(train_data)
_lowerCamelCase = 0.009
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Dict="train" ) -> int:
return calculate_hypothesis_value(__UpperCamelCase , __UpperCamelCase ) - output(
__UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
UpperCAmelCase_ = 0
for i in range(len(__UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ) -> Dict:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ) -> Any:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any=m ) -> Optional[Any]:
UpperCAmelCase_ = 0
for i in range(__UpperCamelCase ):
if index == -1:
summation_value += _error(__UpperCamelCase )
else:
summation_value += _error(__UpperCamelCase ) * train_data[i][0][index]
return summation_value
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> str:
UpperCAmelCase_ = summation_of_cost_derivative(__UpperCamelCase , __UpperCamelCase ) / m
return cost_derivative_value
def SCREAMING_SNAKE_CASE ( ) -> Any:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase_ = 0.000_002
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
while True:
j += 1
UpperCAmelCase_ = [0, 0, 0, 0]
for i in range(0 , len(__UpperCamelCase ) ):
UpperCAmelCase_ = get_cost_derivative(i - 1 )
UpperCAmelCase_ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__UpperCamelCase , __UpperCamelCase , atol=__UpperCamelCase , rtol=__UpperCamelCase , ):
break
UpperCAmelCase_ = temp_parameter_vector
print(('''Number of iterations:''', j) )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
for i in range(len(__UpperCamelCase ) ):
print(('''Actual output value:''', output(__UpperCamelCase , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(__UpperCamelCase , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 366
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
UpperCAmelCase_ = []
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Tuple[int, ...] ) -> Tuple[int, ...]:
UpperCAmelCase_ = []
for d in reversed(__UpperCamelCase ):
idx.append(flat_idx % d )
UpperCAmelCase_ = flat_idx // d
return tuple(reversed(__UpperCamelCase ) )
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Sequence[int] , __UpperCamelCase : Sequence[int] , __UpperCamelCase : Sequence[int] , __UpperCamelCase : Optional[Sequence[bool]] = None , __UpperCamelCase : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__UpperCamelCase : List[bool] ) -> None:
UpperCAmelCase_ = True
for i in range(len(__UpperCamelCase ) ):
UpperCAmelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase_ = l[reversed_idx]
if start_edges is None:
UpperCAmelCase_ = [s == 0 for s in start]
reduce_edge_list(__UpperCamelCase )
if end_edges is None:
UpperCAmelCase_ = [e == (d - 1) for e, d in zip(__UpperCamelCase , __UpperCamelCase )]
reduce_edge_list(__UpperCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCamelCase ) == 0:
return [()]
elif len(__UpperCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCamelCase , __UpperCamelCase ):
if s == e:
path_list.append(slice(__UpperCamelCase , s + 1 ) )
else:
break
UpperCAmelCase_ = tuple(__UpperCamelCase )
UpperCAmelCase_ = len(__UpperCamelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = start[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase_ = end[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : torch.Tensor , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> torch.Tensor:
UpperCAmelCase_ = t.shape[:no_batch_dims]
UpperCAmelCase_ = list(_flat_idx_to_idx(__UpperCamelCase , __UpperCamelCase ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase_ = list(_flat_idx_to_idx(flat_end - 1 , __UpperCamelCase ) )
# Get an ordered list of slices to perform
UpperCAmelCase_ = _get_minimal_slice_set(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
UpperCAmelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Callable , __UpperCamelCase : Dict[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool = False , __UpperCamelCase : Any = None , __UpperCamelCase : bool = False , ) -> Any:
if not (len(__UpperCamelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCAmelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )]
UpperCAmelCase_ = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] )
def _prep_inputs(__UpperCamelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase_ = tensor_tree_map(_prep_inputs , __UpperCamelCase )
UpperCAmelCase_ = None
if _out is not None:
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCamelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase_ = 0
UpperCAmelCase_ = prepped_outputs
for _ in range(__UpperCamelCase ):
# Chunk the input
if not low_mem:
UpperCAmelCase_ = _select_chunk
else:
UpperCAmelCase_ = partial(
_chunk_slice , flat_start=__UpperCamelCase , flat_end=min(__UpperCamelCase , i + chunk_size ) , no_batch_dims=len(__UpperCamelCase ) , )
UpperCAmelCase_ = tensor_tree_map(__UpperCamelCase , __UpperCamelCase )
# Run the layer on the chunk
UpperCAmelCase_ = layer(**__UpperCamelCase )
# Allocate space for the output
if out is None:
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __UpperCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCamelCase , __UpperCamelCase ):
def assign(__UpperCamelCase : dict , __UpperCamelCase : dict ) -> None:
for k, v in da.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
assign(__UpperCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase_ = da[k]
assign(__UpperCamelCase , __UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
for xa, xa in zip(__UpperCamelCase , __UpperCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase_ = xa
elif isinstance(__UpperCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase_ = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , __UpperCamelCase )
return out
class a :
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : int = 5_12 , ):
UpperCAmelCase_ = max_chunk_size
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCamelCase_ ( self : List[Any] , __snake_case : Callable , __snake_case : tuple , __snake_case : int ):
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase_ = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__snake_case : int ) -> bool:
try:
with torch.no_grad():
fn(*__snake_case , chunk_size=__snake_case )
return True
except RuntimeError:
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__snake_case ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase_ = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase_ = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase_ = i
UpperCAmelCase_ = (i + len(__snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCamelCase_ ( self : int , __snake_case : Iterable , __snake_case : Iterable ):
UpperCAmelCase_ = True
for aa, aa in zip(__snake_case , __snake_case ):
assert type(__snake_case ) == type(__snake_case )
if isinstance(__snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __snake_case : x[0] )]
UpperCAmelCase_ = [v for _, v in sorted(aa.items() , key=lambda __snake_case : x[0] )]
consistent &= self._compare_arg_caches(__snake_case , __snake_case )
else:
consistent &= aa == aa
return consistent
def lowerCamelCase_ ( self : str , __snake_case : Callable , __snake_case : tuple , __snake_case : int , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = tree_map(lambda __snake_case : a.shape if isinstance(__snake_case , torch.Tensor ) else a , __snake_case , __snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__snake_case )
UpperCAmelCase_ = self._compare_arg_caches(self.cached_arg_data , __snake_case )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase_ = False
if not consistent:
UpperCAmelCase_ = self._determine_favorable_chunk_size(
__snake_case , __snake_case , __snake_case , )
UpperCAmelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 177
| 0
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowercase (SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE_ , id=SCREAMING_SNAKE_CASE_ )
| 113
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> int:
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
def __A ( self ) -> Optional[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ImageGPTImageProcessor if is_vision_available() else None
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = ImageGPTImageProcessingTester(self )
@property
def __A ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'clusters' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , 'image_processor.json' )
image_processor_first.to_json_file(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_json_file(lowerCAmelCase__ ).to_dict()
SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_pretrained(lowerCAmelCase__ ).to_dict()
SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def __A ( self ) -> Optional[Any]:
pass
def lowercase () -> Union[str, Any]:
SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
SCREAMING_SNAKE_CASE = Image.open(dataset[4]['file'] )
SCREAMING_SNAKE_CASE = Image.open(dataset[5]['file'] )
SCREAMING_SNAKE_CASE = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
SCREAMING_SNAKE_CASE = prepare_images()
# test non-batched
SCREAMING_SNAKE_CASE = image_processing(images[0] , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
SCREAMING_SNAKE_CASE = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase__ )
# test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
SCREAMING_SNAKE_CASE = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase__ )
| 113
| 1
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
A: int = HUGGINGFACE_HUB_CACHE
A: Optional[Any] = "config.json"
A: Union[str, Any] = "diffusion_pytorch_model.bin"
A: List[str] = "diffusion_flax_model.msgpack"
A: List[Any] = "model.onnx"
A: int = "diffusion_pytorch_model.safetensors"
A: List[str] = "weights.pb"
A: List[str] = "https://huggingface.co"
A: str = default_cache_path
A: List[str] = "diffusers_modules"
A: Any = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
A: Dict = ["fp16", "non-ema"]
A: Any = ".self_attn"
| 368
|
"""simple docstring"""
import baseaa
def _snake_case ( UpperCamelCase : str ):
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def _snake_case ( UpperCamelCase : bytes ):
return baseaa.aaadecode(UpperCamelCase ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
| 0
|
from __future__ import annotations
import requests
__lowerCamelCase : int = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : int = 1 , lowerCAmelCase : str = "new" , lowerCAmelCase : list | None = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase ) - valid_terms ) ):
SCREAMING_SNAKE_CASE_ : List[Any] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
SCREAMING_SNAKE_CASE_ : Union[str, Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase )}
SCREAMING_SNAKE_CASE_ : List[str] = {}
for id_ in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 18
|
from __future__ import annotations
from typing import TypedDict
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str
snake_case_ : int
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list[str]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> BWTTransformDict:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_snake_case : Union[str, Any] = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_snake_case : Tuple = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_snake_case : List[str] = [""""""] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ = """Provide a string that I will generate its BWT transform: """
a__ = input(entry_msg).strip()
a__ = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
a__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 317
| 0
|
import argparse
import os
import re
__A = '''src/transformers'''
# Pattern that looks at the indentation in a line.
__A = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__A = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__A = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A = re.compile(r'''\[([^\]]+)\]''')
def __a ( lowerCAmelCase_ : Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_= _re_indent.search(lowerCAmelCase_ )
return "" if search is None else search.groups()[0]
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Optional[Any]="" ,lowerCAmelCase_ : Union[str, Any]=None ,lowerCAmelCase_ : Optional[int]=None ) -> Dict:
'''simple docstring'''
UpperCAmelCase_= 0
UpperCAmelCase_= code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase_ ):
index += 1
UpperCAmelCase_= ["""\n""".join(lines[:index] )]
else:
UpperCAmelCase_= []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase_= [lines[index]]
index += 1
while index < len(lowerCAmelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowerCAmelCase_ ) )
if index < len(lowerCAmelCase_ ) - 1:
UpperCAmelCase_= [lines[index + 1]]
index += 1
else:
UpperCAmelCase_= []
else:
blocks.append("""\n""".join(lowerCAmelCase_ ) )
UpperCAmelCase_= [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase_ ) > 0:
blocks.append("""\n""".join(lowerCAmelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __a ( lowerCAmelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
def _inner(lowerCAmelCase_ : Optional[Any] ):
return key(lowerCAmelCase_ ).lower().replace("""_""" ,"""""" )
return _inner
def __a ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Any=None ) -> str:
'''simple docstring'''
def noop(lowerCAmelCase_ : Union[str, Any] ):
return x
if key is None:
UpperCAmelCase_= noop
# Constants are all uppercase, they go first.
UpperCAmelCase_= [obj for obj in objects if key(lowerCAmelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase_= [obj for obj in objects if key(lowerCAmelCase_ )[0].isupper() and not key(lowerCAmelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase_= [obj for obj in objects if not key(lowerCAmelCase_ )[0].isupper()]
UpperCAmelCase_= ignore_underscore(lowerCAmelCase_ )
return sorted(lowerCAmelCase_ ,key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ ,key=lowerCAmelCase_ ) + sorted(lowerCAmelCase_ ,key=lowerCAmelCase_ )
def __a ( lowerCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
def _replace(lowerCAmelCase_ : Any ):
UpperCAmelCase_= match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
UpperCAmelCase_= [part.strip().replace("""\"""" ,"""""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_= keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCAmelCase_ )] ) + "]"
UpperCAmelCase_= import_statement.split("""\n""" )
if len(lowerCAmelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase_= 2 if lines[1].strip() == """[""" else 1
UpperCAmelCase_= [(i, _re_strip_line.search(lowerCAmelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase_= sort_objects(lowerCAmelCase_ ,key=lambda lowerCAmelCase_ : x[1] )
UpperCAmelCase_= [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase_= _re_bracket_content.sub(_replace ,lines[1] )
else:
UpperCAmelCase_= [part.strip().replace("""\"""" ,"""""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_= keys[:-1]
UpperCAmelCase_= get_indent(lines[1] ) + """, """.join([F"""\"{k}\"""" for k in sort_objects(lowerCAmelCase_ )] )
return "\n".join(lowerCAmelCase_ )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase_= _re_bracket_content.sub(_replace ,lowerCAmelCase_ )
return import_statement
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Optional[int]=True ) -> Optional[Any]:
'''simple docstring'''
with open(lowerCAmelCase_ ,encoding="""utf-8""" ) as f:
UpperCAmelCase_= f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase_= split_code_in_indented_blocks(
lowerCAmelCase_ ,start_prompt="""_import_structure = {""" ,end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(lowerCAmelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase_= main_blocks[block_idx]
UpperCAmelCase_= block.split("""\n""" )
# Get to the start of the imports.
UpperCAmelCase_= 0
while line_idx < len(lowerCAmelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase_= len(lowerCAmelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase_= """\n""".join(block_lines[line_idx:-1] )
UpperCAmelCase_= get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase_= split_code_in_indented_blocks(lowerCAmelCase_ ,indent_level=lowerCAmelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase_= _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase_= [(pattern.search(lowerCAmelCase_ ).groups()[0] if pattern.search(lowerCAmelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase_= [(i, key) for i, key in enumerate(lowerCAmelCase_ ) if key is not None]
UpperCAmelCase_= [x[0] for x in sorted(lowerCAmelCase_ ,key=lambda lowerCAmelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase_= 0
UpperCAmelCase_= []
for i in range(len(lowerCAmelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase_= sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCAmelCase_ )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase_= """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCAmelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowerCAmelCase_ ) )
def __a ( lowerCAmelCase_ : str=True ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= []
for root, _, files in os.walk(lowerCAmelCase_ ):
if "__init__.py" in files:
UpperCAmelCase_= sort_imports(os.path.join(lowerCAmelCase_ ,"""__init__.py""" ) ,check_only=lowerCAmelCase_ )
if result:
UpperCAmelCase_= [os.path.join(lowerCAmelCase_ ,"""__init__.py""" )]
if len(lowerCAmelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCAmelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__A = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 277
|
def __a ( lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __a ( lowerCAmelCase_ : dict[int, list[int]] ) -> list[tuple[int, int]]:
'''simple docstring'''
UpperCAmelCase_= 0
UpperCAmelCase_= len(lowerCAmelCase_ ) # No of vertices in graph
UpperCAmelCase_= [0] * n
UpperCAmelCase_= [False] * n
def dfs(lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : int ):
UpperCAmelCase_= True
UpperCAmelCase_= id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,id_ )
UpperCAmelCase_= min(low[at] ,low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCAmelCase_= min(low[at] ,low[to] )
UpperCAmelCase_= []
for i in range(lowerCAmelCase_ ):
if not visited[i]:
dfs(lowerCAmelCase_ ,-1 ,lowerCAmelCase_ ,id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277
| 1
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase : str =logging.get_logger(__name__)
@add_end_docstrings(a_ )
class _lowercase (a_ ):
'''simple docstring'''
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _lowerCamelCase ( self , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
UpperCamelCase_ = {}
UpperCamelCase_ = {}
if prompt is not None:
UpperCamelCase_ = prompt
if generate_kwargs is not None:
UpperCamelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCamelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCamelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
return super().__call__(snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
UpperCamelCase_ = load_image(snake_case__ )
if prompt is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F"""Received an invalid text input, got - {type(snake_case__ )} - but expected a single string. """
"Note also that one single text can be provided for conditional image to text generation." )
UpperCamelCase_ = self.model.config.model_type
if model_type == "git":
UpperCamelCase_ = self.image_processor(images=snake_case__ , return_tensors=self.framework )
UpperCamelCase_ = self.tokenizer(text=snake_case__ , add_special_tokens=snake_case__ ).input_ids
UpperCamelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCamelCase_ = torch.tensor(snake_case__ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCamelCase_ = self.image_processor(images=snake_case__ , header_text=snake_case__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCamelCase_ = self.image_processor(images=snake_case__ , return_tensors=self.framework )
UpperCamelCase_ = self.tokenizer(snake_case__ , return_tensors=self.framework )
model_inputs.update(snake_case__ )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
UpperCamelCase_ = self.image_processor(images=snake_case__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCamelCase_ = None
return model_inputs
def _lowerCamelCase ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , snake_case__ )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCamelCase_ = None
if generate_kwargs is None:
UpperCamelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCamelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCamelCase_ = self.model.generate(snake_case__ , **snake_case__ , **snake_case__ )
return model_outputs
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = []
for output_ids in model_outputs:
UpperCamelCase_ = {
"generated_text": self.tokenizer.decode(
snake_case__ , skip_special_tokens=snake_case__ , )
}
records.append(snake_case__ )
return records
| 128
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] ={
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """vit_msn"""
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-06 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = qkv_bias
| 128
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__UpperCAmelCase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str =DebertaTokenizer
lowerCamelCase : Dict =True
lowerCamelCase : List[str] =DebertaTokenizerFast
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
__lowerCAmelCase : Optional[int] = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
__lowerCAmelCase : List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase : Any = {"""unk_token""": """[UNK]"""}
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = """lower newer"""
__lowerCAmelCase : Union[str, Any] = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_tokenizer()
__lowerCAmelCase : int = """lower newer"""
__lowerCAmelCase : Any = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__lowerCAmelCase : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : Any = tokenizer("""Hello""" , """World""" )
__lowerCAmelCase : Tuple = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__lowerCAmelCase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Dict = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowerCAmelCase : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase )
__lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCAmelCase : int = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__lowerCAmelCase : Optional[Any] = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
__lowerCAmelCase : List[str] = tokenizer(lowerCAmelCase , padding=lowerCAmelCase )
__lowerCAmelCase : List[str] = [tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) for seq in encoding["""input_ids"""]]
# fmt: off
__lowerCAmelCase : Any = {
"""input_ids""": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCAmelCase : int = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , lowerCAmelCase )
for expected, decoded in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 139
| 0
|
'''simple docstring'''
import numpy
# List of input, output pairs
lowerCAmelCase_ : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase_ : Dict = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
lowerCAmelCase_ : List[str] = [2, 4, 1, 5]
lowerCAmelCase_ : List[Any] = len(train_data)
lowerCAmelCase_ : Any = 0.009
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[Any]="train" ) -> str:
return calculate_hypothesis_value(__UpperCAmelCase , __UpperCAmelCase ) - output(
__UpperCAmelCase , __UpperCAmelCase )
def _lowerCamelCase ( lowercase : List[Any] ) -> List[str]:
_a = 0
for i in range(len(__UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCamelCase ( lowercase : int , lowercase : Tuple ) -> Tuple:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] ) -> Optional[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[int]=m ) -> Union[str, Any]:
_a = 0
for i in range(__UpperCAmelCase ):
if index == -1:
summation_value += _error(__UpperCAmelCase )
else:
summation_value += _error(__UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def _lowerCamelCase ( lowercase : str ) -> str:
_a = summation_of_cost_derivative(__UpperCAmelCase , __UpperCAmelCase ) / m
return cost_derivative_value
def _lowerCamelCase ( ) -> List[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_a = 0.00_00_02
_a = 0
_a = 0
while True:
j += 1
_a = [0, 0, 0, 0]
for i in range(0 , len(__UpperCAmelCase ) ):
_a = get_cost_derivative(i - 1 )
_a = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__UpperCAmelCase , __UpperCAmelCase , atol=__UpperCAmelCase , rtol=__UpperCAmelCase , ):
break
_a = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCamelCase ( ) -> List[Any]:
for i in range(len(__UpperCAmelCase ) ):
print(("Actual output value:", output(__UpperCAmelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__UpperCAmelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 63
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__: str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowercase__: Optional[Any] = parent
lowercase__: List[Any] = batch_size
lowercase__: Tuple = num_channels
lowercase__: Optional[Any] = min_resolution
lowercase__: Dict = max_resolution
lowercase__: Optional[int] = do_resize
lowercase__: Any = size
lowercase__: Optional[Any] = do_normalize
lowercase__: Union[str, Any] = image_mean
lowercase__: Tuple = image_std
lowercase__: str = do_rescale
lowercase__: Any = rescale_factor
lowercase__: List[Any] = do_pad
def _snake_case ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if not batched:
lowercase__: Optional[Any] = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
lowercase__, lowercase__: Dict = image.size
else:
lowercase__, lowercase__: Optional[Any] = image.shape[1], image.shape[2]
if w < h:
lowercase__: List[str] = int(self.size['''shortest_edge'''] * h / w )
lowercase__: Union[str, Any] = self.size['''shortest_edge''']
elif w > h:
lowercase__: int = self.size['''shortest_edge''']
lowercase__: int = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase__: Union[str, Any] = self.size['''shortest_edge''']
lowercase__: Union[str, Any] = self.size['''shortest_edge''']
else:
lowercase__: Optional[int] = []
for image in image_inputs:
lowercase__, lowercase__: int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__: Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
lowercase__: Dict = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = YolosImageProcessor if is_vision_available() else None
def _snake_case ( self ):
lowercase__: int = YolosImageProcessingTester(self )
@property
def _snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
def _snake_case ( self ):
lowercase__: Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
lowercase__: Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# Initialize image_processing
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
lowercase__: int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__, lowercase__: Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__, lowercase__: Any = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
lowercase__: int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ):
# Initialize image_processing
lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
lowercase__: List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__, lowercase__: str = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Dict = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase__, lowercase__: str = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ):
# Initialize image_processing
lowercase__: Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
lowercase__: Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__, lowercase__: int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase__, lowercase__: List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ):
# Initialize image_processings
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
lowercase__: Optional[Any] = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase )
# create random PyTorch tensors
lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase__: List[str] = image_processing_a.pad(_UpperCAmelCase , return_tensors='''pt''' )
lowercase__: Tuple = image_processing_a(_UpperCAmelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def _snake_case ( self ):
# prepare image and target
lowercase__: Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase__: Any = json.loads(f.read() )
lowercase__: Dict = {'''image_id''': 39769, '''annotations''': target}
# encode them
lowercase__: Dict = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase__: Any = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase__: Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase )
lowercase__: Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase__: Tuple = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) )
# verify boxes
lowercase__: str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase__: Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) )
# verify is_crowd
lowercase__: Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) )
# verify class_labels
lowercase__: Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) )
# verify orig_size
lowercase__: List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) )
# verify size
lowercase__: List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) )
@slow
def _snake_case ( self ):
# prepare image, target and masks_path
lowercase__: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase__: str = json.loads(f.read() )
lowercase__: List[Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
lowercase__: Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase__: Union[str, Any] = YolosImageProcessor(format='''coco_panoptic''' )
lowercase__: Optional[Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase__: Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase )
lowercase__: Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase__: str = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) )
# verify boxes
lowercase__: List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase__: int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) )
# verify is_crowd
lowercase__: int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) )
# verify class_labels
lowercase__: Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) )
# verify masks
lowercase__: Union[str, Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _UpperCAmelCase )
# verify orig_size
lowercase__: List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) )
# verify size
lowercase__: Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) )
| 177
| 0
|
from abc import ABC, abstractmethod
from typing import List, Optional
class __snake_case ( snake_case__ ):
def __init__( self ) -> List[Any]:
'''simple docstring'''
self.test()
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict =0
UpperCAmelCase : str =False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase : Optional[int] =self.advance()
if not self.does_advance(_A ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple =self.update(_A )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def UpperCAmelCase__ ( self , snake_case__=False ) -> int:
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __snake_case ( snake_case__ ):
def __init__( self , snake_case__ ) -> List[str]:
'''simple docstring'''
super(_A , self ).__init__()
if not isinstance(_A , _A ) or len(_A ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCAmelCase : Optional[Any] =token_ids
UpperCAmelCase : Optional[Any] =len(self.token_ids )
UpperCAmelCase : Union[str, Any] =-1 # the index of the currently fulfilled step
UpperCAmelCase : List[Any] =False
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict:
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_A )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase__ ( self , snake_case__ ) -> Any:
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_A )}''' )
UpperCAmelCase : Dict =False
UpperCAmelCase : Tuple =False
UpperCAmelCase : Optional[Any] =False
if self.does_advance(_A ):
self.fulfilled_idx += 1
UpperCAmelCase : Dict =True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase : List[str] =True
UpperCAmelCase : Any =completed
else:
# failed to make progress.
UpperCAmelCase : Tuple =True
self.reset()
return stepped, completed, reset
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =False
UpperCAmelCase : Optional[Any] =0
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase__ ( self , snake_case__=False ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Dict =PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase : List[str] =self.seqlen
UpperCAmelCase : Dict =self.fulfilled_idx
UpperCAmelCase : Optional[Any] =self.completed
return new_constraint
class __snake_case :
def __init__( self , snake_case__ , snake_case__=True ) -> Any:
'''simple docstring'''
UpperCAmelCase : int =max([len(_A ) for one in nested_token_ids] )
UpperCAmelCase : List[Any] ={}
for token_ids in nested_token_ids:
UpperCAmelCase : Optional[int] =root
for tidx, token_id in enumerate(_A ):
if token_id not in level:
UpperCAmelCase : Any ={}
UpperCAmelCase : Optional[Any] =level[token_id]
if no_subsets and self.has_subsets(_A , _A ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f''' {nested_token_ids}.''' )
UpperCAmelCase : Optional[int] =root
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any =self.trie
for current_token in current_seq:
UpperCAmelCase : Any =start[current_token]
UpperCAmelCase : Union[str, Any] =list(start.keys() )
return next_tokens
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int =self.next_tokens(_A )
return len(_A ) == 0
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =list(root.values() )
if len(_A ) == 0:
return 1
else:
return sum([self.count_leaves(_A ) for nn in next_nodes] )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Tuple =self.count_leaves(_A )
return len(_A ) != leaf_count
class __snake_case ( snake_case__ ):
def __init__( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
super(_A , self ).__init__()
if not isinstance(_A , _A ) or len(_A ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_A , _A ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCAmelCase : Optional[int] =DisjunctiveTrie(_A )
UpperCAmelCase : str =nested_token_ids
UpperCAmelCase : List[str] =self.trie.max_height
UpperCAmelCase : int =[]
UpperCAmelCase : Optional[int] =False
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Tuple =self.trie.next_tokens(self.current_seq )
if len(_A ) == 0:
return None
else:
return token_list
def UpperCAmelCase__ ( self , snake_case__ ) -> Any:
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}''' )
UpperCAmelCase : List[Any] =self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase__ ( self , snake_case__ ) -> Any:
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}''' )
UpperCAmelCase : Optional[Any] =False
UpperCAmelCase : Optional[Any] =False
UpperCAmelCase : Optional[Any] =False
if self.does_advance(_A ):
self.current_seq.append(_A )
UpperCAmelCase : int =True
else:
UpperCAmelCase : Any =True
self.reset()
UpperCAmelCase : Union[str, Any] =self.trie.reached_leaf(self.current_seq )
UpperCAmelCase : Optional[int] =completed
return stepped, completed, reset
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict =False
UpperCAmelCase : Optional[Any] =[]
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase__ ( self , snake_case__=False ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase : Dict =self.seqlen
UpperCAmelCase : List[str] =self.current_seq
UpperCAmelCase : Dict =self.completed
return new_constraint
class __snake_case :
def __init__( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase : Optional[Any] =max([c.seqlen for c in constraints] )
UpperCAmelCase : List[Any] =len(_A )
UpperCAmelCase : Any =False
self.init_state()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[Any] =[]
UpperCAmelCase : Tuple =None
UpperCAmelCase : Tuple =[constraint.copy(stateful=_A ) for constraint in self.constraints]
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str =0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =[]
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase : Dict =constraint.advance()
if isinstance(_A , _A ):
token_list.append(_A )
elif isinstance(_A , _A ):
token_list.extend(_A )
else:
UpperCAmelCase : str =self.inprogress_constraint.advance()
if isinstance(_A , _A ):
token_list.append(_A )
elif isinstance(_A , _A ):
token_list.extend(_A )
if len(_A ) == 0:
return None
else:
return token_list
def UpperCAmelCase__ ( self , snake_case__ ) -> Any:
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase , UpperCAmelCase : Optional[int] =self.add(_A )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase__ ( self , snake_case__ ) -> str:
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCAmelCase , UpperCAmelCase : Any =False, False
if self.completed:
UpperCAmelCase : str =True
UpperCAmelCase : Tuple =False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any =self.inprogress_constraint.update(_A )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_A ) )
UpperCAmelCase : Union[str, Any] =None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase : int =None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase : str =True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_A ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pending_constraint.update(_A )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_A )
UpperCAmelCase : List[Any] =None
if not complete and stepped:
UpperCAmelCase : Dict =pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase : List[str] =(
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase : Optional[int] =True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase__ ( self , snake_case__=True ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase : Optional[int] =[
constraint.copy(stateful=_A ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase : str =self.inprogress_constraint.copy(stateful=_A )
UpperCAmelCase : List[Any] =[constraint.copy() for constraint in self.pending_constraints]
return new_state
| 363
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = KandinskyVaaControlnetPipeline
__lowerCamelCase : int = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase : Dict = False
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 100
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any ={
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase : List[Any] =UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.dummy_unet
UpperCAmelCase : Tuple =self.dummy_movq
UpperCAmelCase : Union[str, Any] =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
UpperCAmelCase : Tuple ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> Any:
'''simple docstring'''
UpperCAmelCase : str =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase : Tuple =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create hint
UpperCAmelCase : Tuple =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('''mps''' ):
UpperCAmelCase : Optional[int] =torch.manual_seed(snake_case__ )
else:
UpperCAmelCase : int =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase : List[str] ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''cpu'''
UpperCAmelCase : List[Any] =self.get_dummy_components()
UpperCAmelCase : Tuple =self.pipeline_class(**snake_case__ )
UpperCAmelCase : Tuple =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Optional[int] =pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase : str =output.images
UpperCAmelCase : List[str] =pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
UpperCAmelCase : Union[str, Any] =image[0, -3:, -3:, -1]
UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Union[str, Any] =np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
UpperCAmelCase : int =torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
UpperCAmelCase : List[str] =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase : Dict =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
UpperCAmelCase : int =KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
UpperCAmelCase : str =pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : int ='''A robot, 4k photo'''
UpperCAmelCase : int =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : List[str] =pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase : List[str] =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase : Dict =pipeline(
image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , output_type='''np''' , )
UpperCAmelCase : List[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 78
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple ) -> List[Any]:
_validate_point(_a )
_validate_point(_a )
if len(_a ) != len(_a ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(_a , _a ) ) )
def lowercase ( lowerCAmelCase__ : Tuple ) -> str:
if point:
if isinstance(_a , _a ):
for item in point:
if not isinstance(_a , (int, float) ):
__a = (
"Expected a list of numbers as input, found "
f'''{type(_a ).__name__}'''
)
raise TypeError(_a )
else:
__a = f'''Expected a list of numbers as input, found {type(_a ).__name__}'''
raise TypeError(_a )
else:
raise ValueError('''Missing an input''' )
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
_validate_point(_a )
_validate_point(_a )
if len(_a ) != len(_a ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(_a , _a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
|
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = Github(os.environ["GITHUB_TOKEN"])
SCREAMING_SNAKE_CASE : List[str] = g.get_repo("huggingface/transformers")
SCREAMING_SNAKE_CASE : Optional[int] = repo.get_issues(state="open")
for issue in open_issues:
SCREAMING_SNAKE_CASE : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _a: i.created_at , reverse=_a)
SCREAMING_SNAKE_CASE : str = comments[0] if len(_a) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
if __name__ == "__main__":
main()
| 76
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCamelCase_ ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
SCREAMING_SNAKE_CASE_ = BarthezTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().setUp()
a = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=__lowerCamelCase )
a = tokenizer
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = '''<pad>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(vocab_keys[-1] ,'''<mask>''' )
self.assertEqual(len(__lowerCamelCase ) ,10_11_22 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,10_11_22 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [0, 57, 30_18, 7_03_07, 91, 2]
a = self.tokenizer(
__lowerCamelCase ,max_length=len(__lowerCamelCase ) ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,return_tensors='''pt''' )
self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
a = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = '''I was born in 92000, and this is falsé.'''
a = tokenizer.tokenize(__lowerCamelCase )
a = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = {'''input_ids''': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase ,model_name='''moussaKam/mbarthez''' ,revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' ,sequences=__lowerCamelCase ,)
| 330
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
a = '''The dog is cute and lives in the garden house'''
a = jnp.array([tokenizer.encode(__lowerCamelCase )] )
a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
a = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
a = model(__lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape ,__lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
| 330
| 1
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RoCBertTokenizer
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = filter_non_english
def lowercase_ ( self : Tuple ) ->int:
super().setUp()
snake_case__ : Any = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
snake_case__ : Union[str, Any] = {}
snake_case__ : Tuple = {}
for i, value in enumerate(_snake_case ):
snake_case__ : Optional[Any] = i
snake_case__ : int = i
snake_case__ : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['word_shape_file'] )
snake_case__ : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file, 'w', encoding='utf-8' ) as word_shape_writer:
json.dump(_snake_case, _snake_case, ensure_ascii=_snake_case )
with open(self.word_pronunciation_file, 'w', encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_snake_case, _snake_case, ensure_ascii=_snake_case )
def lowercase_ ( self : int ) ->Union[str, Any]:
snake_case__ : Union[str, Any] = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file )
snake_case__ : int = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_snake_case, ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ), [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_snake_case ), [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_snake_case ), [5, 6, 2, 5, 7, 8] )
def lowercase_ ( self : int ) ->int:
snake_case__ : Any = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ), ['ah', '\u535A', '\u63A8', 'zz'] )
def lowercase_ ( self : Optional[Any] ) ->Any:
snake_case__ : int = RoCBertBasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ), ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def lowercase_ ( self : Any ) ->Union[str, Any]:
snake_case__ : Tuple = RoCBertBasicTokenizer(do_lower_case=_snake_case, strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['h\u00E9llo'] )
def lowercase_ ( self : Optional[int] ) ->str:
snake_case__ : str = RoCBertBasicTokenizer(do_lower_case=_snake_case, strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def lowercase_ ( self : Union[str, Any] ) ->Tuple:
snake_case__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def lowercase_ ( self : Tuple ) ->Optional[int]:
snake_case__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase_ ( self : Optional[int] ) ->Union[str, Any]:
snake_case__ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_snake_case, strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase_ ( self : List[str] ) ->int:
snake_case__ : str = RoCBertBasicTokenizer(do_lower_case=_snake_case, strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase_ ( self : Any ) ->Tuple:
snake_case__ : Tuple = RoCBertBasicTokenizer(do_lower_case=_snake_case, never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowercase_ ( self : Any ) ->Tuple:
snake_case__ : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case__ : List[Any] = {}
for i, token in enumerate(_snake_case ):
snake_case__ : List[str] = i
snake_case__ : List[Any] = RoCBertWordpieceTokenizer(vocab=_snake_case, unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ), [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ), ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ), ['[UNK]', 'runn', '##ing'] )
def lowercase_ ( self : str ) ->Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowercase_ ( self : Tuple ) ->Any:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowercase_ ( self : Union[str, Any] ) ->Dict:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def lowercase_ ( self : Union[str, Any] ) ->List[Any]:
snake_case__ : Tuple = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_snake_case ) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
snake_case__ : Any = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_snake_case ) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']] )
def lowercase_ ( self : Any ) ->int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_snake_case, **_snake_case )
snake_case__ : int = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
snake_case__ : str = tokenizer_r.encode_plus(
_snake_case, return_attention_mask=_snake_case, return_token_type_ids=_snake_case, return_offsets_mapping=_snake_case, add_special_tokens=_snake_case, )
snake_case__ : Any = tokenizer_r.do_lower_case if hasattr(_snake_case, 'do_lower_case' ) else False
snake_case__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results], tokens['offset_mapping'] )
def lowercase_ ( self : str ) ->List[Any]:
snake_case__ : Union[str, Any] = ['的', '人', '有']
snake_case__ : Dict = ''.join(_snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Dict = True
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained(_snake_case, **_snake_case )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(_snake_case, **_snake_case )
snake_case__ : int = tokenizer_p.encode(_snake_case, add_special_tokens=_snake_case )
snake_case__ : Tuple = tokenizer_r.encode(_snake_case, add_special_tokens=_snake_case )
snake_case__ : List[str] = tokenizer_r.convert_ids_to_tokens(_snake_case )
snake_case__ : str = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_snake_case, _snake_case )
self.assertListEqual(_snake_case, _snake_case )
snake_case__ : int = False
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(_snake_case, **_snake_case )
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained(_snake_case, **_snake_case )
snake_case__ : Dict = tokenizer_r.encode(_snake_case, add_special_tokens=_snake_case )
snake_case__ : List[Any] = tokenizer_p.encode(_snake_case, add_special_tokens=_snake_case )
snake_case__ : Tuple = tokenizer_r.convert_ids_to_tokens(_snake_case )
snake_case__ : List[Any] = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case__ : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_snake_case )
]
self.assertListEqual(_snake_case, _snake_case )
self.assertListEqual(_snake_case, _snake_case )
@slow
def lowercase_ ( self : Dict ) ->str:
snake_case__ : Dict = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file )
snake_case__ : Union[str, Any] = tokenizer.encode('你好', add_special_tokens=_snake_case )
snake_case__ : List[str] = tokenizer.encode('你是谁', add_special_tokens=_snake_case )
snake_case__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_snake_case )
snake_case__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case, _snake_case )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
snake_case__ : List[Any] = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case__ : Any = '你好,你是谁'
snake_case__ : Dict = tokenizer.tokenize(_snake_case )
snake_case__ : Any = tokenizer.convert_tokens_to_ids(_snake_case )
snake_case__ : List[Any] = tokenizer.convert_tokens_to_shape_ids(_snake_case )
snake_case__ : Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(_snake_case )
snake_case__ : Tuple = tokenizer.prepare_for_model(
_snake_case, _snake_case, _snake_case, add_special_tokens=_snake_case )
snake_case__ : List[str] = tokenizer.encode_plus(_snake_case, add_special_tokens=_snake_case )
self.assertEqual(_snake_case, _snake_case )
| 277
|
from collections import deque
from .hash_table import HashTable
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any], *_snake_case : Optional[Any], **_snake_case : List[Any] ) ->Optional[int]:
super().__init__(*_snake_case, **_snake_case )
def lowercase_ ( self : Optional[Any], _snake_case : Tuple, _snake_case : Dict ) ->Dict:
snake_case__ : int = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case )
snake_case__ : Dict = self.values[key]
def lowercase_ ( self : Any ) ->Optional[Any]:
return (
sum(self.charge_factor - len(_snake_case ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowercase_ ( self : Union[str, Any], _snake_case : str, _snake_case : Optional[int]=None ) ->Optional[Any]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0
):
return key
return super()._collision_resolution(_snake_case, _snake_case )
| 277
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'luke'
def __init__( self : int ,lowercase__ : Tuple=5_0_2_6_7 ,lowercase__ : str=5_0_0_0_0_0 ,lowercase__ : Union[str, Any]=7_6_8 ,lowercase__ : Any=2_5_6 ,lowercase__ : int=1_2 ,lowercase__ : Dict=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : Dict="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[Any]=5_1_2 ,lowercase__ : Tuple=2 ,lowercase__ : Any=0.0_2 ,lowercase__ : Tuple=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[int]=None ,lowercase__ : Tuple=1 ,lowercase__ : int=0 ,lowercase__ : Tuple=2 ,**lowercase__ : Dict ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = entity_vocab_size
__lowercase = hidden_size
__lowercase = entity_emb_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_entity_aware_attention
__lowercase = classifier_dropout
| 52
|
'''simple docstring'''
lowerCAmelCase__ = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.355_818,
}
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowercase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(A__ )}"
)
raise ValueError(A__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 1
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
UpperCAmelCase : Optional[Any] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
UpperCAmelCase : Optional[Any] = parser.parse_args()
if args.check_lib:
UpperCAmelCase : Optional[Any] = importlib.import_module("""transformers""")
UpperCAmelCase : Dict = Path(transformers_module.__file__).parent
else:
UpperCAmelCase : str = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 95
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
_A : Optional[int] = ['''pixel_values''']
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : int = 0.9 ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = size if size is not None else {"shortest_edge": 224}
SCREAMING_SNAKE_CASE:Dict = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE:List[str] = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name="crop_size" )
SCREAMING_SNAKE_CASE:Optional[Any] = do_resize
SCREAMING_SNAKE_CASE:List[Any] = size
SCREAMING_SNAKE_CASE:Tuple = crop_pct
SCREAMING_SNAKE_CASE:Tuple = resample
SCREAMING_SNAKE_CASE:List[str] = do_center_crop
SCREAMING_SNAKE_CASE:Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE:Dict = do_rescale
SCREAMING_SNAKE_CASE:int = rescale_factor
SCREAMING_SNAKE_CASE:Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE:Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE:Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : Optional[float] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE:Any = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
SCREAMING_SNAKE_CASE:List[str] = int(size["height"] / crop_pct )
else:
SCREAMING_SNAKE_CASE:int = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:Tuple = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
else:
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE:Optional[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=size["shortest_edge"] ,default_to_square=SCREAMING_SNAKE_CASE__ )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE:str = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(SCREAMING_SNAKE_CASE__ ) )
return resize(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : int ,):
SCREAMING_SNAKE_CASE:Any = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE__ ,size=(size["height"], size["width"]) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : int = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
SCREAMING_SNAKE_CASE:Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE:List[Any] = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE:Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE:Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE:str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE:List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE:int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE:Dict = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE:Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE:Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE:Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE:Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name="crop_size" )
SCREAMING_SNAKE_CASE:List[str] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE:Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE:Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,crop_pct=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE:Any = [self.center_crop(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE:List[str] = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE:List[str] = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:int = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__ )
| 139
| 0
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( snake_case ):
UpperCamelCase__ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE ( self , **_a ):
__magic_name__ : List[Any] = {
"num_train_timesteps": 1_000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE ( self ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def SCREAMING_SNAKE_CASE ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def SCREAMING_SNAKE_CASE ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def SCREAMING_SNAKE_CASE ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def SCREAMING_SNAKE_CASE ( self ):
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def SCREAMING_SNAKE_CASE ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def SCREAMING_SNAKE_CASE ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.scheduler_classes[0]
__magic_name__ : Optional[int] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Optional[int] = self.get_scheduler_config()
__magic_name__ : int = scheduler_class(**_a )
__magic_name__ : int = len(_a )
__magic_name__ : List[str] = self.dummy_model()
__magic_name__ : Optional[Any] = self.dummy_sample_deter
__magic_name__ : str = self.dummy_sample_deter + 0.1
__magic_name__ : Union[str, Any] = self.dummy_sample_deter - 0.1
__magic_name__ : Tuple = samplea.shape[0]
__magic_name__ : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
__magic_name__ : Union[str, Any] = torch.arange(_a )[0:3, None].repeat(1 , _a )
__magic_name__ : str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__magic_name__ : Tuple = scheduler.batch_step_no_noise(_a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__magic_name__ : Optional[Any] = torch.sum(torch.abs(_a ) )
__magic_name__ : Union[str, Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1e-2
assert abs(result_mean.item() - 0.50_05 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Dict = self.get_scheduler_config()
__magic_name__ : Tuple = scheduler_class(**_a )
__magic_name__ : Any = len(_a )
__magic_name__ : List[Any] = self.dummy_model()
__magic_name__ : List[str] = self.dummy_sample_deter
__magic_name__ : Dict = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Any = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__magic_name__ : List[str] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_a ) )
__magic_name__ : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
__magic_name__ : int = scheduler_class(**_a )
__magic_name__ : Tuple = len(_a )
__magic_name__ : Optional[int] = self.dummy_model()
__magic_name__ : int = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
__magic_name__ : Any = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Any = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__magic_name__ : str = pred_prev_sample
__magic_name__ : List[str] = torch.sum(torch.abs(_a ) )
__magic_name__ : str = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[Any] = self.get_scheduler_config()
__magic_name__ : List[Any] = scheduler_class(**_a )
__magic_name__ : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
__magic_name__ : str = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
__magic_name__ : List[str] = -1
else:
__magic_name__ : Any = timesteps[i + 1]
__magic_name__ : List[str] = scheduler.previous_timestep(_a )
__magic_name__ : List[Any] = prev_t.item()
self.assertEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : List[str] = scheduler_class(**_a )
__magic_name__ : int = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.scheduler_classes[0]
__magic_name__ : Tuple = self.get_scheduler_config()
__magic_name__ : Optional[int] = scheduler_class(**_a )
__magic_name__ : Tuple = [100, 87, 50, 1, 0]
__magic_name__ : List[Any] = len(_a )
with self.assertRaises(_a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Tuple = scheduler_class(**_a )
__magic_name__ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_a )
| 41
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
snake_case : Any = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 41
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 400_0000 ):
__SCREAMING_SNAKE_CASE = [0, 1]
__SCREAMING_SNAKE_CASE = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__SCREAMING_SNAKE_CASE = 0
for j in range(len(UpperCamelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78
| 0
|
"""simple docstring"""
import math
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 369
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''YolosFeatureExtractor''']
__a = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'perceiver'
def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = num_latents
lowercase : Union[str, Any] = d_latents
lowercase : str = d_model
lowercase : int = num_blocks
lowercase : str = num_self_attends_per_block
lowercase : List[str] = num_self_attention_heads
lowercase : List[str] = num_cross_attention_heads
lowercase : int = qk_channels
lowercase : List[Any] = v_channels
lowercase : int = cross_attention_shape_for_attention
lowercase : Tuple = self_attention_widening_factor
lowercase : Dict = cross_attention_widening_factor
lowercase : Any = hidden_act
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Any = use_query_residual
# masked language modeling attributes
lowercase : List[str] = vocab_size
lowercase : Dict = max_position_embeddings
# image classification attributes
lowercase : int = image_size
# flow attributes
lowercase : List[Any] = train_size
# multimodal autoencoding attributes
lowercase : List[Any] = num_frames
lowercase : Union[str, Any] = audio_samples_per_frame
lowercase : int = samples_per_patch
lowercase : Optional[int] = output_shape
class __SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCamelCase ( self ):
if self.task == "multiple-choice":
lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : str = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) )
lowercase : Union[str, Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 337
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = CTRLTokenizer
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : List[str] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
snake_case : int = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : str = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
snake_case : Optional[Any] = {'''unk_token''': '''<unk>'''}
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def lowerCAmelCase ( self : Dict , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
snake_case : int = '''adapt react readapt apt'''
snake_case : str = '''adapt react readapt apt'''
return input_text, output_text
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case : List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case : Tuple = '''adapt react readapt apt'''
snake_case : Tuple = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
snake_case : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Any = tokens + [tokenizer.unk_token]
snake_case : Tuple = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 83
|
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 1
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A_ ( _lowerCAmelCase ) -> int:
# getting number of pixels in the image
UpperCamelCase , UpperCamelCase : List[str] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
UpperCamelCase : Tuple = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__lowerCamelCase : Any = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__lowerCamelCase : Optional[int] = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 52
|
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Optional[int] = int(_lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 )
return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else ""
UpperCamelCase : Any = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 52
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( a_ ):
__SCREAMING_SNAKE_CASE :Any = """dpr"""
def __init__( self : List[Any] , a__ : List[str]=3_0522 , a__ : Optional[Any]=768 , a__ : int=12 , a__ : Optional[Any]=12 , a__ : List[str]=3072 , a__ : Dict="gelu" , a__ : Optional[Any]=0.1 , a__ : Dict=0.1 , a__ : int=512 , a__ : Dict=2 , a__ : Optional[Any]=0.02 , a__ : List[Any]=1E-12 , a__ : Dict=0 , a__ : Optional[Any]="absolute" , a__ : int = 0 , **a__ : Dict , ):
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = projection_dim
__magic_name__ = position_embedding_type
| 358
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__magic_name__ = MaskFormerConfig(backbone_config=a )
__magic_name__ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__magic_name__ = 847
__magic_name__ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__magic_name__ = 150
__magic_name__ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__magic_name__ = 171
__magic_name__ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__magic_name__ = 133
__magic_name__ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__magic_name__ = 19
__magic_name__ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__magic_name__ = 65
__magic_name__ = '''mapillary-vistas-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(a ): v for k, v in idalabel.items()}
return config
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
__magic_name__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def UpperCamelCase ( a , a , a ) -> str:
'''simple docstring'''
__magic_name__ = dct.pop(a )
__magic_name__ = val
def UpperCamelCase ( a , a ) -> List[str]:
'''simple docstring'''
__magic_name__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__magic_name__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__magic_name__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[:dim, :]
__magic_name__ = in_proj_bias[: dim]
__magic_name__ = in_proj_weight[
dim : dim * 2, :
]
__magic_name__ = in_proj_bias[
dim : dim * 2
]
__magic_name__ = in_proj_weight[
-dim :, :
]
__magic_name__ = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
# fmt: off
__magic_name__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: hidden_size, :]
__magic_name__ = in_proj_bias[:config.hidden_size]
__magic_name__ = in_proj_weight[hidden_size : hidden_size * 2, :]
__magic_name__ = in_proj_bias[hidden_size : hidden_size * 2]
__magic_name__ = in_proj_weight[-hidden_size :, :]
__magic_name__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: hidden_size, :]
__magic_name__ = in_proj_bias[:config.hidden_size]
__magic_name__ = in_proj_weight[hidden_size : hidden_size * 2, :]
__magic_name__ = in_proj_bias[hidden_size : hidden_size * 2]
__magic_name__ = in_proj_weight[-hidden_size :, :]
__magic_name__ = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase ( ) -> torch.Tensor:
'''simple docstring'''
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( a , a , a , a = False ) -> Dict:
'''simple docstring'''
__magic_name__ = get_maskformer_config(a )
# load original state_dict
with open(a , '''rb''' ) as f:
__magic_name__ = pickle.load(a )
__magic_name__ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__magic_name__ = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_swin_q_k_v(a , config.backbone_config )
read_in_decoder_q_k_v(a , a )
# update to torch tensors
for key, value in state_dict.items():
__magic_name__ = torch.from_numpy(a )
# load 🤗 model
__magic_name__ = MaskFormerForInstanceSegmentation(a )
model.eval()
for name, param in model.named_parameters():
print(a , param.shape )
__magic_name__ , __magic_name__ = model.load_state_dict(a , strict=a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(a ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
__magic_name__ = prepare_img()
if "vistas" in model_name:
__magic_name__ = 65
elif "cityscapes" in model_name:
__magic_name__ = 6_5535
else:
__magic_name__ = 255
__magic_name__ = True if '''ade''' in model_name else False
__magic_name__ = MaskFormerImageProcessor(ignore_index=a , reduce_labels=a )
__magic_name__ = image_processor(a , return_tensors='''pt''' )
__magic_name__ = model(**a )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__magic_name__ = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 98
| 0
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : Dict = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Tuple = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCamelCase__ : Tuple = s_dict.pop(UpperCamelCase )
elif "subsample" in key:
lowerCamelCase__ : List[Any] = s_dict.pop(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ , lowerCamelCase__ : Tuple = emb.weight.shape
lowerCamelCase__ : int = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
lowerCamelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : int = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : List[Any] = mam_aaa["""args"""]
lowerCamelCase__ : Union[str, Any] = mam_aaa["""model"""]
lowerCamelCase__ : List[str] = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
lowerCamelCase__ : List[Any] = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCamelCase__ : int = args.share_decoder_input_output_embed
lowerCamelCase__ : Optional[int] = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split(""",""" )]
lowerCamelCase__ : List[Any] = SpeechaTextConfig(
vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , )
lowerCamelCase__ : List[Any] = SpeechaTextForConditionalGeneration(UpperCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
lowerCamelCase__ : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase__ : Optional[Any] = lm_head_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : str =parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 41
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = grid.shape
lowerCamelCase__ : List[str] = [-1, 1, 0, 0]
lowerCamelCase__ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Any = [(0, source)], set()
lowerCamelCase__ : Tuple = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = np.empty((rows, cols) , dtype=UpperCamelCase )
lowerCamelCase__ : str = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = heappop(UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = predecessors[x, y]
path.append(UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Union[str, Any] = dist + 1
lowerCamelCase__ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase_ ( )-> Union[str, Any]:
_snake_case : int = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCAmelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCAmelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCAmelCase )
return parser.parse_args()
def lowerCamelCase_ ( )-> Optional[int]:
_snake_case : Dict = parse_args()
# Import training_script as a module.
_snake_case : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_snake_case : Dict = script_fpath.stem
_snake_case : Tuple = importlib.import_module(lowerCAmelCase )
# Patch sys.argv
_snake_case : Optional[Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 260
|
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> int:
while a != 0:
_snake_case , _snake_case : Optional[Any] = b % a, a
return b
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> int:
if gcd(lowerCAmelCase , lowerCAmelCase ) != 1:
_snake_case : Any = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowerCAmelCase )
_snake_case , _snake_case , _snake_case : Optional[Any] = 1, 0, a
_snake_case , _snake_case , _snake_case : Optional[int] = 0, 1, m
while va != 0:
_snake_case : Dict = ua // va
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 260
| 1
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__magic_name__ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__magic_name__ = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split()
)
__magic_name__ = "|".join(sys.argv[1:])
__magic_name__ = re.compile(RF"""^({joined_dirs}).*?\.py$""")
__magic_name__ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 100
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303
| 0
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowercase = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase = 14) -> None:
if group not in primes:
raise ValueError('''Unsupported Group''')
__UpperCamelCase :List[Any] = primes[group]['''prime''']
__UpperCamelCase :Optional[Any] = primes[group]['''generator''']
__UpperCamelCase :Any = int(hexlify(urandom(32)) , base=16)
def UpperCamelCase__ ( self) -> str:
return hex(self.__private_key)[2:]
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :str = pow(self.generator , self.__private_key , self.prime)
return hex(__lowercase)[2:]
def UpperCamelCase__ ( self , __lowercase) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__lowercase , (self.prime - 1) // 2 , self.prime) == 1
)
def UpperCamelCase__ ( self , __lowercase) -> str:
__UpperCamelCase :Dict = int(__lowercase , base=16)
if not self.is_valid_public_key(__lowercase):
raise ValueError('''Invalid public key''')
__UpperCamelCase :str = pow(__lowercase , self.__private_key , self.prime)
return shaaaa(str(__lowercase).encode()).hexdigest()
@staticmethod
def UpperCamelCase__ ( __lowercase , __lowercase) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__lowercase , (prime - 1) // 2 , __lowercase) == 1
)
@staticmethod
def UpperCamelCase__ ( __lowercase , __lowercase , __lowercase = 14) -> str:
__UpperCamelCase :str = int(__lowercase , base=16)
__UpperCamelCase :Dict = int(__lowercase , base=16)
__UpperCamelCase :int = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(__lowercase , __lowercase):
raise ValueError('''Invalid public key''')
__UpperCamelCase :Any = pow(__lowercase , __lowercase , __lowercase)
return shaaaa(str(__lowercase).encode()).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """gptj"""
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=50_400 , __lowercase=2_048 , __lowercase=4_096 , __lowercase=28 , __lowercase=16 , __lowercase=64 , __lowercase=None , __lowercase="gelu_new" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=1E-5 , __lowercase=0.02 , __lowercase=True , __lowercase=50_256 , __lowercase=50_256 , __lowercase=False , **__lowercase , ) -> Tuple:
__UpperCamelCase :Any = vocab_size
__UpperCamelCase :Optional[int] = n_positions
__UpperCamelCase :Tuple = n_embd
__UpperCamelCase :int = n_layer
__UpperCamelCase :Any = n_head
__UpperCamelCase :Any = n_inner
__UpperCamelCase :Dict = rotary_dim
__UpperCamelCase :Tuple = activation_function
__UpperCamelCase :Optional[Any] = resid_pdrop
__UpperCamelCase :Any = embd_pdrop
__UpperCamelCase :List[str] = attn_pdrop
__UpperCamelCase :str = layer_norm_epsilon
__UpperCamelCase :List[Any] = initializer_range
__UpperCamelCase :Dict = use_cache
__UpperCamelCase :List[Any] = bos_token_id
__UpperCamelCase :Tuple = eos_token_id
super().__init__(
bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = "default" , __lowercase = None , __lowercase = False , ) -> Any:
super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase)
if not getattr(self._config , '''pad_token_id''' , __lowercase):
# TODO: how to do that better?
__UpperCamelCase :Tuple = 0
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase :Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''')
__UpperCamelCase :str = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCamelCase :Any = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_layer
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_head
def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase :Optional[int] = super(__lowercase , self).generate_dummy_inputs(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase)
# We need to order the input in the way they appears in the forward()
__UpperCamelCase :int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCamelCase , __UpperCamelCase :str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCamelCase :List[str] = seqlen + 2
__UpperCamelCase :Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase :Tuple = [
(torch.zeros(__lowercase), torch.zeros(__lowercase)) for _ in range(self.num_layers)
]
__UpperCamelCase :Tuple = common_inputs['''attention_mask''']
if self.use_past:
__UpperCamelCase :Tuple = ordered_inputs['''attention_mask'''].dtype
__UpperCamelCase :Optional[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase)] , dim=1)
return ordered_inputs
@property
def UpperCamelCase__ ( self) -> int:
return 13
| 105
| 1
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowercase )
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
self.check_model_type(lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : int=None ,lowerCamelCase__ : int=None ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : str = {}, {}
if padding is not None:
_UpperCamelCase : List[str] = padding
if truncation is not None:
_UpperCamelCase : Optional[int] = truncation
if top_k is not None:
_UpperCamelCase : List[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : int ,lowerCamelCase__ : Union["Image.Image", str] ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : List[Any] ):
'''simple docstring'''
if isinstance(lowerCamelCase__ ,(Image.Image, str) ) and isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : int = {'image': image, 'question': question}
else:
_UpperCamelCase : List[Any] = image
_UpperCamelCase : Union[str, Any] = super().__call__(lowerCamelCase__ ,**lowerCamelCase__ )
return results
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : int=False ):
'''simple docstring'''
_UpperCamelCase : str = load_image(inputs['image'] )
_UpperCamelCase : Optional[int] = self.tokenizer(
inputs['question'] ,return_tensors=self.framework ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ )
_UpperCamelCase : Any = self.image_processor(images=lowerCamelCase__ ,return_tensors=self.framework )
model_inputs.update(lowerCamelCase__ )
return model_inputs
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.model(**lowerCamelCase__ )
return model_outputs
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
_UpperCamelCase : List[str] = self.model.config.num_labels
if self.framework == "pt":
_UpperCamelCase : List[str] = model_outputs.logits.sigmoid()[0]
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = probs.topk(lowerCamelCase__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_UpperCamelCase : Optional[int] = scores.tolist()
_UpperCamelCase : int = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase__ ,lowerCamelCase__ )]
| 83
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 83
| 1
|
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ) -> Dict:
'''simple docstring'''
A: Optional[Any] = inspect.getfile(accelerate.test_utils )
A: int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
A: Dict = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _snake_case ( self : str ) -> Dict:
'''simple docstring'''
A: List[Any] = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
A: Any = [sys.executable] + distributed_args
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 334
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
return np.maximum(0 , __lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 334
| 1
|
"""simple docstring"""
lowerCAmelCase__ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 72
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98
| 0
|
"""simple docstring"""
from maths.prime_factors import prime_factors
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
if not isinstance(_a, _a ):
_UpperCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_a )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_a = get_logger(__name__)
class _UpperCAmelCase:
def __init__( self , __a = None) -> List[str]:
'''simple docstring'''
_UpperCamelCase = (
os.path.join(__a , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCamelCase = Extractor
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCamelCase = os.path.abspath(__a)
return os.path.join(self.extract_dir , hash_url_to_filename(__a))
def UpperCAmelCase ( self , __a , __a) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(__a) and not (os.path.isdir(__a) and os.listdir(__a))
)
def UpperCAmelCase ( self , __a , __a = False) -> str:
'''simple docstring'''
_UpperCamelCase = self.extractor.infer_extractor_format(__a)
if not extractor_format:
return input_path
_UpperCamelCase = self._get_output_path(__a)
if self._do_extract(__a , __a):
self.extractor.extract(__a , __a , __a)
return output_path
class _UpperCAmelCase( lowerCamelCase ):
@classmethod
@abstractmethod
def UpperCAmelCase ( cls , __a , **__a) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
...
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase ):
lowercase__ = []
@staticmethod
def UpperCAmelCase ( __a , __a) -> Any:
'''simple docstring'''
with open(__a , '''rb''') as f:
return f.read(__a)
@classmethod
def UpperCAmelCase ( cls , __a , __a = b"") -> bool:
'''simple docstring'''
if not magic_number:
_UpperCamelCase = max(len(__a) for cls_magic_number in cls.magic_numbers)
try:
_UpperCamelCase = cls.read_magic_number(__a , __a)
except OSError:
return False
return any(magic_number.startswith(__a) for cls_magic_number in cls.magic_numbers)
class _UpperCAmelCase( lowerCamelCase ):
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(__a)
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[str]:
'''simple docstring'''
def resolved(__a) -> str:
return os.path.realpath(os.path.abspath(__a))
def badpath(__a , __a) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__a , __a)).startswith(__a)
def badlink(__a , __a) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCamelCase = resolved(os.path.join(__a , os.path.dirname(info.name)))
return badpath(info.linkname , base=__a)
_UpperCamelCase = resolved(__a)
for finfo in members:
if badpath(finfo.name , __a):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''')
elif finfo.issym() and badlink(__a , __a):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''')
elif finfo.islnk() and badlink(__a , __a):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''')
else:
yield finfo
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a)
_UpperCamelCase = tarfile.open(__a)
tar_file.extractall(__a , members=TarExtractor.safemembers(__a , __a))
tar_file.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x1F\x8B']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with gzip.open(__a , '''rb''') as gzip_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def UpperCAmelCase ( cls , __a , __a = b"") -> bool:
'''simple docstring'''
if super().is_extractable(__a , magic_number=__a):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__a , '''rb''') as fp:
_UpperCamelCase = _EndRecData(__a)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCamelCase = fp.read(__a) # CD is where we expect it to be
if len(__a) == sizeCentralDir:
_UpperCamelCase = struct.unpack(__a , __a) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a)
with zipfile.ZipFile(__a , '''r''') as zip_file:
zip_file.extractall(__a)
zip_file.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with lzma.open(__a) as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''')
import rarfile
os.makedirs(__a , exist_ok=__a)
_UpperCamelCase = rarfile.RarFile(__a)
rf.extractall(__a)
rf.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''')
import zstandard as zstd
_UpperCamelCase = zstd.ZstdDecompressor()
with open(__a , '''rb''') as ifh, open(__a , '''wb''') as ofh:
dctx.copy_stream(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x42\x5A\x68']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with bza.open(__a , '''rb''') as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''')
import pyazr
os.makedirs(__a , exist_ok=__a)
with pyazr.SevenZipFile(__a , '''r''') as archive:
archive.extractall(__a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x04\x22\x4D\x18']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''')
import lza.frame
with lza.frame.open(__a , '''rb''') as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowercase__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase ( cls) -> Any:
'''simple docstring'''
return max(
len(__a)
for extractor in cls.extractors.values()
if issubclass(__a , __a)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[str]:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__a , magic_number_length=__a)
except OSError:
return b""
@classmethod
def UpperCAmelCase ( cls , __a , __a = False) -> bool:
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=__a , )
_UpperCamelCase = cls.infer_extractor_format(__a)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase ( cls , __a) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_UpperCamelCase = cls._get_magic_number_max_length()
_UpperCamelCase = cls._read_magic_number(__a , __a)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__a , magic_number=__a):
return extractor_format
@classmethod
def UpperCAmelCase ( cls , __a , __a , __a = None , __a = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(__a) , exist_ok=__a)
# Prevent parallel extractions
_UpperCamelCase = str(Path(__a).with_suffix('''.lock'''))
with FileLock(__a):
shutil.rmtree(__a , ignore_errors=__a)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__a , __a): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=__a , )
_UpperCamelCase = extractor if extractor != '''deprecated''' else extractor_format
else:
_UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(__a , __a)
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=__a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__a):
return extractor.extract(__a , __a)
| 100
| 0
|
"""simple docstring"""
__A : Any = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__A : str = ["a", "b", "c", "d", "e"]
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_UpperCAmelCase = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
_UpperCAmelCase = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__A : Union[str, Any] = topological_sort("a", [], [])
print(sort)
| 260
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_SCREAMING_SNAKE_CASE ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
__A : Dict = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 260
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v2'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = depth_divisible_by
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = expand_ratio
lowerCAmelCase_ = output_stride
lowerCAmelCase_ = first_layer_is_expansion
lowerCAmelCase_ = finegrained_output
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = semantic_loss_ignore_index
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> float:
return 1E-4
| 22
|
from __future__ import annotations
def A(__a: dict , __a: str ):
lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start]
while stack:
lowerCAmelCase_ = stack.pop()
explored.add(__a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__a )
return explored
lowerCamelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 22
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->str:
'''simple docstring'''
if "cls_token" in name:
a : Optional[Any] = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
a : Tuple = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
a : List[Any] = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
a : Optional[int] = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : Union[str, Any] = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a : Dict = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
a : Optional[int] = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
a : int = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
a : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Optional[Any] = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : Dict = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
a : Tuple = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
a : List[Any] = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
a : str = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
a : Optional[int] = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
a : List[str] = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Any ) ->Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : Tuple = orig_state_dict.pop(_lowercase )
if "qkv" in key:
a : List[str] = key.split("." )
a : List[str] = int(key_split[1] )
if "decoder_blocks" in key:
a : Optional[int] = config.decoder_hidden_size
a : Optional[Any] = "decoder.decoder_layers."
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : Dict = val[dim : dim * 2, :]
a : int = val[-dim:, :]
elif "bias" in key:
a : List[Any] = val[:dim]
a : Tuple = val[dim : dim * 2]
a : Tuple = val[-dim:]
else:
a : Tuple = config.hidden_size
a : Tuple = "vit.encoder.layer."
if "weight" in key:
a : List[str] = val[:dim, :]
a : Optional[int] = val[dim : dim * 2, :]
a : Union[str, Any] = val[-dim:, :]
elif "bias" in key:
a : str = val[:dim]
a : Any = val[dim : dim * 2]
a : Union[str, Any] = val[-dim:]
else:
a : Optional[int] = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : List[str] ) ->Any:
'''simple docstring'''
a : Any = ViTMAEConfig()
if "large" in checkpoint_url:
a : Optional[Any] = 1024
a : Optional[Any] = 4096
a : Any = 24
a : Union[str, Any] = 16
elif "huge" in checkpoint_url:
a : Optional[int] = 14
a : Union[str, Any] = 1280
a : str = 5120
a : Tuple = 32
a : Union[str, Any] = 16
a : Optional[Any] = ViTMAEForPreTraining(_lowercase )
a : Tuple = torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" )["model"]
a : Dict = ViTMAEImageProcessor(size=config.image_size )
a : Optional[int] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
model.eval()
a : Dict = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
a : Any = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
a : int = ViTMAEImageProcessor(size=config.image_size )
a : List[str] = image_processor(images=_lowercase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
a : List[Any] = model(**_lowercase )
a : Optional[int] = outputs.logits
if "large" in checkpoint_url:
a : List[Any] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
a : int = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
a : Optional[int] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a : Dict = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 105
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
a : Tuple = precision
a : str = ceil(precision / 14 )
a : List[Any] = 42_6880 * Decimal(1_0005 ).sqrt()
a : Union[str, Any] = 1
a : Dict = 1359_1409
a : Optional[int] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
a : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a : Optional[Any] = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 105
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') ,up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') ,)
return model
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE :int = PNDMScheduler()
__SCREAMING_SNAKE_CASE :int = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :Any = pndm(generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=20 ,output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE :Any = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :str = pndm(generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=20 ,output_type='''numpy''' ,return_dict=SCREAMING_SNAKE_CASE__ )[0]
__SCREAMING_SNAKE_CASE :Any = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = '''google/ddpm-cifar10-32'''
__SCREAMING_SNAKE_CASE :Tuple = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = PNDMScheduler()
__SCREAMING_SNAKE_CASE :List[Any] = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :Any = pndm(generator=SCREAMING_SNAKE_CASE__ ,output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :Optional[Any] = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 239
|
"""simple docstring"""
import math
import random
def __lowerCamelCase ( a_ : float , a_ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase_ = 0.02
def __lowerCamelCase ( a_ : int , a_ : int ) -> float:
__SCREAMING_SNAKE_CASE :Any = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(a_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE :Any = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE :Tuple = (expected / 1_00) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE :Union[str, Any] = layer_1_error * sigmoid_function(a_ , a_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input("Expected value: "))
lowerCamelCase_ = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 239
| 1
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
SCREAMING_SNAKE_CASE =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =f'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
SCREAMING_SNAKE_CASE =[sys.executable] + distributed_args
execute_subprocess_async(snake_case ,env=os.environ.copy() )
| 334
|
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__lowercase = len(lowerCamelCase_ )
__lowercase = max(lowerCamelCase_ )
__lowercase = min(lowerCamelCase_ )
# create the counting array
__lowercase = coll_max + 1 - coll_min
__lowercase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCamelCase_ ):
__lowercase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCamelCase_ ) ):
__lowercase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
return "".join([chr(lowerCamelCase_ ) for i in counting_sort([ord(lowerCamelCase_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
_SCREAMING_SNAKE_CASE = input('''Enter numbers separated by a comma:\n''').strip()
_SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 217
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[Any] = logging.get_logger(__name__)
a_ : int = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int ='roc_bert'
def __init__( self, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=True, lowerCAmelCase=0, lowerCAmelCase="absolute", lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=768, lowerCAmelCase=910, lowerCAmelCase=512, lowerCAmelCase=24_858, lowerCAmelCase=True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =use_cache
lowerCamelCase_ =enable_pronunciation
lowerCamelCase_ =enable_shape
lowerCamelCase_ =pronunciation_embed_dim
lowerCamelCase_ =pronunciation_vocab_size
lowerCamelCase_ =shape_embed_dim
lowerCamelCase_ =shape_vocab_size
lowerCamelCase_ =concat_input
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase, **lowerCAmelCase )
| 75
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ = logging.get_logger(__name__)
@add_end_docstrings(
__a , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__):
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__)
else:
raise ValueError("""Unsupported framework""")
return masked_index
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.get_masked_index(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def snake_case_ ( self , lowerCAmelCase__):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__):
if return_tensors is None:
__SCREAMING_SNAKE_CASE = self.framework
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
self.ensure_exactly_one_mask_token(lowerCAmelCase__)
return model_inputs
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""]
return model_outputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=5 , lowerCAmelCase__=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__SCREAMING_SNAKE_CASE = target_ids.shape[0]
__SCREAMING_SNAKE_CASE = model_outputs["""input_ids"""][0]
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""]
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
__SCREAMING_SNAKE_CASE = outputs.numpy()
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCAmelCase__ , axis=-1)
if target_ids is not None:
__SCREAMING_SNAKE_CASE = tf.gather_nd(tf.squeeze(lowerCAmelCase__ , 0) , target_ids.reshape(-1 , 1))
__SCREAMING_SNAKE_CASE = tf.expand_dims(lowerCAmelCase__ , 0)
__SCREAMING_SNAKE_CASE = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = topk.values.numpy(), topk.indices.numpy()
else:
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1)
if target_ids is not None:
__SCREAMING_SNAKE_CASE = probs[..., target_ids]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = probs.topk(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
__SCREAMING_SNAKE_CASE = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
__SCREAMING_SNAKE_CASE = input_ids.numpy().copy()
if target_ids is not None:
__SCREAMING_SNAKE_CASE = target_ids[p].tolist()
__SCREAMING_SNAKE_CASE = p
# Filter padding out:
__SCREAMING_SNAKE_CASE = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p]), """sequence""": sequence}
row.append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
if single_mask:
return result[0]
return result
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [targets]
try:
__SCREAMING_SNAKE_CASE = self.tokenizer.get_vocab()
except Exception:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = []
for target in targets:
__SCREAMING_SNAKE_CASE = vocab.get(lowerCAmelCase__ , lowerCAmelCase__)
if id_ is None:
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , max_length=1 , truncation=lowerCAmelCase__ , )["""input_ids"""]
if len(lowerCAmelCase__) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""")
continue
__SCREAMING_SNAKE_CASE = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.")
target_ids.append(id_)
__SCREAMING_SNAKE_CASE = list(set(lowerCAmelCase__))
if len(lowerCAmelCase__) == 0:
raise ValueError("""At least one target must be provided when passed.""")
__SCREAMING_SNAKE_CASE = np.array(lowerCAmelCase__)
return target_ids
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = {}
if targets is not None:
__SCREAMING_SNAKE_CASE = self.get_target_ids(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = target_ids
if top_k is not None:
__SCREAMING_SNAKE_CASE = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""")
return {}, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) == 1:
return outputs[0]
return outputs
| 100
| 0
|
"""simple docstring"""
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowercase : Image , _lowercase : int ) ->Image:
'''simple docstring'''
a : Dict = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowercase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowercase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
a : List[Any] = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 79
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Any = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79
| 1
|
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__SCREAMING_SNAKE_CASE :List[str] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = simple_accuracy(__lowercase , __lowercase )
_UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0]
_UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowercase , __lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mrpc":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowercase , __lowercase )
elif task_name == "qqp":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
| 22
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22
| 1
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=32 , a_=3 , a_=4 , a_=[10, 20, 30, 40] , a_=[2, 2, 3, 2] , a_=True , a_=True , a_=37 , a_="gelu" , a_=10 , a_=0.02 , a_=["stage2", "stage3", "stage4"] , a_=3 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : Any = image_size
__snake_case : List[Any] = num_channels
__snake_case : Union[str, Any] = num_stages
__snake_case : Any = hidden_sizes
__snake_case : Dict = depths
__snake_case : Optional[Any] = is_training
__snake_case : Dict = use_labels
__snake_case : Tuple = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : List[str] = type_sequence_label_size
__snake_case : Tuple = initializer_range
__snake_case : int = out_features
__snake_case : Tuple = num_labels
__snake_case : Optional[Any] = scope
__snake_case : Optional[Any] = num_stages
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=a_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=a_ , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = UperNetForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = config_and_inputs
__snake_case : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCamelCase__ ={'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = UperNetModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(a_ )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Tuple = [*signature.parameters.keys()]
__snake_case : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
def check_hidden_states_output(a_ , a_ , a_ ):
__snake_case : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Any = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[Any] = True
check_hidden_states_output(a_ , a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = _config_zero_init(a_ )
__snake_case : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = UperNetForSemanticSegmentation.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase ( ) ->Any:
"""simple docstring"""
__snake_case : Dict = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
__snake_case : Union[str, Any] = Image.open(_snake_case ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
__snake_case : int = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(a_ )
__snake_case : str = prepare_img()
__snake_case : Union[str, Any] = processor(images=a_ , return_tensors='''pt''' ).to(a_ )
with torch.no_grad():
__snake_case : Any = model(**a_ )
__snake_case : int = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , a_ )
__snake_case : List[str] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1E-4 ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
__snake_case : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(a_ )
__snake_case : Dict = prepare_img()
__snake_case : Optional[Any] = processor(images=a_ , return_tensors='''pt''' ).to(a_ )
with torch.no_grad():
__snake_case : Optional[int] = model(**a_ )
__snake_case : List[Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , a_ )
__snake_case : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1E-4 ) )
| 24
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Dict = seq_length
__snake_case : List[str] = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = scope
__snake_case : Any = range_bbox
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[str] = bbox[i, j, 3]
__snake_case : Any = bbox[i, j, 1]
__snake_case : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : List[str] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : Dict = t
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ )
__snake_case : List[str] = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ )
__snake_case : Dict = torch.tensor([[1, 2]] , device=a_ )
__snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ )
__snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] )
__snake_case : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
| 24
| 1
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : int = {"vocab_file": "vocab.txt"}
_lowercase : List[Any] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowercase : int = {
"facebook/esm2_t6_8M_UR50D": 1024,
"facebook/esm2_t12_35M_UR50D": 1024,
}
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Optional[Any]:
with open(UpperCAmelCase__ , """r""" ) as f:
lowercase_ : Optional[Any] = f.read().splitlines()
return [l.strip() for l in lines]
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , lowercase_ : List[str] , lowercase_ : Any="<unk>" , lowercase_ : str="<cls>" , lowercase_ : Dict="<pad>" , lowercase_ : int="<mask>" , lowercase_ : Optional[Any]="<eos>" , **lowercase_ : Any , ):
super().__init__(**lowercase_ )
lowercase_ : List[Any] = load_vocab_file(lowercase_ )
lowercase_ : Union[str, Any] = dict(enumerate(self.all_tokens ) )
lowercase_ : Tuple = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ : Optional[int] = unk_token
lowercase_ : List[str] = cls_token
lowercase_ : Dict = pad_token
lowercase_ : Optional[Any] = mask_token
lowercase_ : List[str] = eos_token
lowercase_ : List[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int ):
return self._id_to_token.get(lowercase_ , self.unk_token )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : str ):
return self._token_to_id.get(lowercase_ , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , **lowercase_ : List[str] ):
return text.split()
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Any=False ):
return len(self._id_to_token )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : str ):
return self._token_to_id.get(lowercase_ , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : int ):
return self._id_to_token.get(lowercase_ , self.unk_token )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
lowercase_ : Any = [self.cls_token_id]
lowercase_ : List[Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ : Optional[Any] = [1] + ([0] * len(lowercase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowercase_ ) + [1]
return mask
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] ):
lowercase_ : Dict = os.path.join(lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(lowercase_ , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return self.get_vocab_size(with_added_tokens=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[List[str], List[AddedToken]] , lowercase_ : bool = False ):
return super()._add_tokens(lowercase_ , special_tokens=lowercase_ )
| 239
|
'''simple docstring'''
import math
import unittest
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
with self.assertRaises(lowercase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 239
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class A ( _a ):
lowercase_ = 'microsoft/speecht5_tts'
lowercase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
lowercase_ = 'text_reader'
lowercase_ = SpeechTaProcessor
lowercase_ = SpeechTaForTextToSpeech
lowercase_ = SpeechTaHifiGan
lowercase_ = ['text']
lowercase_ = ['audio']
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
if self.post_processor is None:
_a = '''microsoft/speecht5_hifigan'''
super().setup()
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple=None ) -> List[Any]:
"""simple docstring"""
_a = self.pre_processor(text=lowerCAmelCase_ , return_tensors='''pt''' , truncation=lowerCAmelCase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
_a = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
_a = torch.tensor(embeddings_dataset[73_05]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> int:
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
return self.post_processor(lowerCAmelCase_ ).cpu().detach()
| 179
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_snake_case : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( _a ):
def __init__( self : Dict , lowerCAmelCase_ : CLIPSegForImageSegmentation , lowerCAmelCase_ : CLIPSegProcessor , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase_ : StableDiffusionSafetyChecker , lowerCAmelCase_ : CLIPImageProcessor , ) -> int:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
_a = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
_a = dict(scheduler.config )
_a = 1
_a = FrozenDict(lowerCAmelCase_ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
_a = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
_a = dict(scheduler.config )
_a = True
_a = FrozenDict(lowerCAmelCase_ )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=lowerCAmelCase_ , segmentation_processor=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase_ : str , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : Any , ) -> Tuple:
"""simple docstring"""
_a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
_a = self.segmentation_model(**lowerCAmelCase_ )
_a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_a = self.numpy_to_pil(lowerCAmelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , height=lowerCAmelCase_ , width=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , eta=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , output_type=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=lowerCAmelCase_ , )
| 179
| 1
|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE=2_8_1_2_3 ) -> Optional[Any]:
__lowerCAmelCase: str = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__lowerCAmelCase: Union[str, Any] = set()
__lowerCAmelCase: Optional[Any] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 217
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = ["""pixel_values"""]
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , )-> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: int = size if size is not None else {"shortest_edge": 2_5_6}
__lowerCAmelCase: str = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
__lowerCAmelCase: Any = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase: Optional[Any] = get_size_dict(UpperCamelCase__ , param_name="crop_size")
__lowerCAmelCase: str = do_resize
__lowerCAmelCase: Any = size
__lowerCAmelCase: Dict = resample
__lowerCAmelCase: Tuple = do_center_crop
__lowerCAmelCase: str = crop_size
__lowerCAmelCase: List[Any] = do_rescale
__lowerCAmelCase: int = rescale_factor
__lowerCAmelCase: List[Any] = do_normalize
__lowerCAmelCase: Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase: Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , )-> np.ndarray:
'''simple docstring'''
__lowerCAmelCase: int = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
__lowerCAmelCase: Optional[Any] = get_resize_output_image_size(UpperCamelCase__ , size=size["shortest_edge"] , default_to_square=UpperCamelCase__)
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , )-> np.ndarray:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = get_size_dict(UpperCamelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int])-> np.ndarray:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , )-> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[Any] , )-> Dict:
'''simple docstring'''
__lowerCAmelCase: Any = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase: str = size if size is not None else self.size
__lowerCAmelCase: Tuple = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__)
__lowerCAmelCase: List[str] = resample if resample is not None else self.resample
__lowerCAmelCase: str = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase: Tuple = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase: List[Any] = get_size_dict(UpperCamelCase__ , param_name="crop_size")
__lowerCAmelCase: List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase: Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase: Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase: Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase: Tuple = image_std if image_std is not None else self.image_std
__lowerCAmelCase: Union[str, Any] = make_list_of_images(UpperCamelCase__)
if not valid_images(UpperCamelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
__lowerCAmelCase: Tuple = [to_numpy_array(UpperCamelCase__) for image in images]
if do_resize:
__lowerCAmelCase: Union[str, Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__) for image in images]
if do_center_crop:
__lowerCAmelCase: Optional[Any] = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__) for image in images]
if do_rescale:
__lowerCAmelCase: Optional[Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__) for image in images]
if do_normalize:
__lowerCAmelCase: List[str] = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__) for image in images]
__lowerCAmelCase: Optional[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__) for image in images]
__lowerCAmelCase: List[str] = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__)
def lowercase_ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Tuple] = None)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__) != len(UpperCamelCase__):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(UpperCamelCase__):
__lowerCAmelCase: Optional[int] = target_sizes.numpy()
__lowerCAmelCase: List[Any] = []
for idx in range(len(UpperCamelCase__)):
__lowerCAmelCase: List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCamelCase__)
else:
__lowerCAmelCase: Tuple = logits.argmax(dim=1)
__lowerCAmelCase: Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 217
| 1
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class a ( nn.Module ):
_snake_case : int
_snake_case : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_states.shape
_UpperCAmelCase = jax.image.resize(
__lowerCAmelCase , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
_UpperCAmelCase = self.conv(__lowerCAmelCase )
return hidden_states
class a ( nn.Module ):
_snake_case : int
_snake_case : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , __lowerCAmelCase : List[Any] ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_UpperCAmelCase = self.conv(__lowerCAmelCase )
return hidden_states
class a ( nn.Module ):
_snake_case : int
_snake_case : int = None
_snake_case : float = 0.0
_snake_case : bool = None
_snake_case : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.in_channels if self.out_channels is None else self.out_channels
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCAmelCase = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = nn.Dense(__lowerCAmelCase , dtype=self.dtype )
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCAmelCase = nn.Dropout(self.dropout_prob )
_UpperCAmelCase = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_UpperCAmelCase = None
if use_nin_shortcut:
_UpperCAmelCase = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=True ):
_UpperCAmelCase = hidden_states
_UpperCAmelCase = self.norma(__lowerCAmelCase )
_UpperCAmelCase = nn.swish(__lowerCAmelCase )
_UpperCAmelCase = self.conva(__lowerCAmelCase )
_UpperCAmelCase = self.time_emb_proj(nn.swish(__lowerCAmelCase ) )
_UpperCAmelCase = jnp.expand_dims(jnp.expand_dims(__lowerCAmelCase , 1 ) , 1 )
_UpperCAmelCase = hidden_states + temb
_UpperCAmelCase = self.norma(__lowerCAmelCase )
_UpperCAmelCase = nn.swish(__lowerCAmelCase )
_UpperCAmelCase = self.dropout(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = self.conva(__lowerCAmelCase )
if self.conv_shortcut is not None:
_UpperCAmelCase = self.conv_shortcut(__lowerCAmelCase )
return hidden_states + residual
| 30
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def __lowercase ( __lowercase ) -> List[int]:
'''simple docstring'''
if isinstance(__lowercase , np.ndarray ):
return list(tensor.shape )
_A = tf.shape(__lowercase )
if tensor.shape == tf.TensorShape(__lowercase ):
return dynamic
_A = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowercase )]
def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=__lowercase , name=__lowercase )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=1e-5 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowercase , __lowercase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
_A , _A = tf.nn.moments(__lowercase , axes=[axis] , keepdims=__lowercase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_A = [1] * inputs.shape.rank
_A = shape_list(__lowercase )[axis]
_A = tf.reshape(__lowercase , __lowercase )
_A = tf.reshape(__lowercase , __lowercase )
# Compute layer normalization using the batch_normalization
# function.
_A = tf.nn.batch_normalization(
__lowercase , __lowercase , __lowercase , offset=__lowercase , scale=__lowercase , variance_epsilon=__lowercase , )
return outputs
def __lowercase ( __lowercase , __lowercase=0 , __lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_A = tf.shape(__lowercase )
_A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowercase , __lowercase )
def __lowercase ( __lowercase ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(__lowercase , tf.Tensor ):
_A = tf.convert_to_tensor(__lowercase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_A = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_A = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowercase ( __lowercase , __lowercase , __lowercase = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
__lowercase , tf.cast(__lowercase , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(__lowercase )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
_A = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_A = [x for x in data if len(__lowercase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
_A = np.asarray(__lowercase )
_A = 1
_A = np.array_split(__lowercase , __lowercase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_A = np.array_split(__lowercase , __lowercase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowercase ):
_A = chunk_data
else:
_A = data
def __lowercase ( __lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
if name in group.attrs:
_A = [n.decode("utf8" ) if hasattr(__lowercase , "decode" ) else n for n in group.attrs[name]]
else:
_A = []
_A = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(__lowercase , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowercase ( __lowercase ) -> Optional[int]:
'''simple docstring'''
def _expand_single_ad_tensor(__lowercase ):
if isinstance(__lowercase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowercase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowercase )
| 79
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''canine'''
def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = type_vocab_size
_A = layer_norm_eps
# Character config:
_A = downsampling_rate
_A = upsampling_kernel_size
_A = num_hash_functions
_A = num_hash_buckets
_A = local_transformer_stride
| 79
| 1
|
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(__snake_case ,__snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(__snake_case ) == 1:
return True
snake_case = series[1] - series[0]
for index in range(len(__snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(__snake_case ,__snake_case ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__snake_case ) == 0:
raise ValueError('''Input list must be a non empty list''' )
snake_case = 0
for val in series:
answer += val
return answer / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'efficientnet'
def __init__( self , __snake_case = 3 , __snake_case = 6_0_0 , __snake_case = 2.0 , __snake_case = 3.1 , __snake_case = 8 , __snake_case = [3, 3, 5, 3, 5, 5, 3] , __snake_case = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case = [] , __snake_case = [1, 2, 2, 2, 1, 2, 1] , __snake_case = [1, 2, 2, 3, 3, 4, 1] , __snake_case = [1, 6, 6, 6, 6, 6, 6] , __snake_case = 0.25 , __snake_case = "swish" , __snake_case = 2_5_6_0 , __snake_case = "mean" , __snake_case = 0.02 , __snake_case = 0.001 , __snake_case = 0.99 , __snake_case = 0.5 , __snake_case = 0.2 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = num_channels
snake_case = image_size
snake_case = width_coefficient
snake_case = depth_coefficient
snake_case = depth_divisor
snake_case = kernel_sizes
snake_case = in_channels
snake_case = out_channels
snake_case = depthwise_padding
snake_case = strides
snake_case = num_block_repeats
snake_case = expand_ratios
snake_case = squeeze_expansion_ratio
snake_case = hidden_act
snake_case = hidden_dim
snake_case = pooling_type
snake_case = initializer_range
snake_case = batch_norm_eps
snake_case = batch_norm_momentum
snake_case = dropout_rate
snake_case = drop_connect_rate
snake_case = sum(__snake_case ) * 4
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a_ ( self ):
return 1E-5
| 213
| 0
|
from manim import *
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : List[str] ):
"""simple docstring"""
__snake_case = Rectangle(height=0.5 , width=0.5 )
__snake_case = Rectangle(height=0.2_5 , width=0.2_5 )
__snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''CPU''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
__snake_case = [mem.copy() for i in range(4 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''GPU''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''Model''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
__snake_case = []
__snake_case = []
__snake_case = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
__snake_case = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
model_cpu_arr.append(a__ )
self.add(*a__ , *a__ , *a__ )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''Loaded Checkpoint''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(a__ )
__snake_case = []
__snake_case = []
for i, rect in enumerate(a__ ):
__snake_case = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
ckpt_arr.append(a__ )
__snake_case = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a__ )
self.add(*a__ , *a__ )
__snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__snake_case = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
__snake_case = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a__ )
__snake_case = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(*a__ ).arrange(a__ , buff=0 )
__snake_case = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
__snake_case = Text('''Disk''' , font_size=24 )
__snake_case = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(a__ , run_time=3 ) , Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
__snake_case = []
for i, rect in enumerate(a__ ):
__snake_case = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(FadeOut(a__ ) )
__snake_case = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ , run_time=3 ) )
self.play(
FadeOut(a__ , a__ , *a__ , *a__ ) , )
self.wait()
| 24
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
import math
a__ : List[str] = 10
a__ : Optional[int] = 7
a__ : int = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCAmelCase_( a__ = 20 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = math.comb(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ )
SCREAMING_SNAKE_CASE : Any = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 19
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 179
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a_ = NewType("""DataClass""", Any)
a_ = NewType("""DataClassType""", Any)
def __lowercase ( snake_case_ : List[str] ) ->List[str]:
'''simple docstring'''
if isinstance(snake_case_ ,snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __lowercase ( snake_case_ : list ) ->Callable[[str], Any]:
'''simple docstring'''
__A : List[Any] = {str(snake_case_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(snake_case_ ,snake_case_ )
def __lowercase ( *,
snake_case_ : Union[str, List[str]] = None ,snake_case_ : str = None ,snake_case_ : Any = dataclasses.MISSING ,snake_case_ : Callable[[], Any] = dataclasses.MISSING ,snake_case_ : dict = None ,**snake_case_ : str ,) ->dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__A : Optional[Any] = {}
if aliases is not None:
__A : List[Any] = aliases
if help is not None:
__A : str = help
return dataclasses.field(metadata=snake_case_ ,default=snake_case_ ,default_factory=snake_case_ ,**snake_case_ )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
def __init__( self , __lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
if "formatter_class" not in kwargs:
__A : str = ArgumentDefaultsHelpFormatter
super().__init__(**__lowerCamelCase )
if dataclasses.is_dataclass(__lowerCamelCase ):
__A : Union[str, Any] = [dataclass_types]
__A : Optional[Any] = list(__lowerCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowerCamelCase )
@staticmethod
def UpperCamelCase__( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = F"""--{field.name}"""
__A : List[Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowerCamelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__A : Tuple = kwargs.pop('''aliases''' , [] )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : Optional[int] = [aliases]
__A : str = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowerCamelCase , '''UnionType''' ) and isinstance(__lowerCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowerCamelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F""" Problem encountered in field '{field.name}'.""" )
if type(__lowerCamelCase ) not in field.type.__args__:
# filter `str` in Union
__A : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__A : int = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__A : int = (
field.type.__args__[0] if isinstance(__lowerCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__A : Tuple = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__A : Union[str, Any] = {}
if origin_type is Literal or (isinstance(field.type , __lowerCamelCase ) and issubclass(field.type , __lowerCamelCase )):
if origin_type is Literal:
__A : Union[str, Any] = field.type.__args__
else:
__A : Union[str, Any] = [x.value for x in field.type]
__A : Optional[int] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__A : Dict = field.default
else:
__A : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__A : Any = copy(__lowerCamelCase )
# Hack because type=bool in argparse does not behave as we want.
__A : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__A : Optional[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__A : Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
__A : str = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__A : int = True
elif isclass(__lowerCamelCase ) and issubclass(__lowerCamelCase , __lowerCamelCase ):
__A : str = field.type.__args__[0]
__A : List[str] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__A : Optional[int] = field.default_factory()
elif field.default is dataclasses.MISSING:
__A : Tuple = True
else:
__A : Union[str, Any] = field.type
if field.default is not dataclasses.MISSING:
__A : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
__A : List[str] = field.default_factory()
else:
__A : str = True
parser.add_argument(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__A : List[str] = False
parser.add_argument(F"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if hasattr(__lowerCamelCase , '''_argument_group_name''' ):
__A : Tuple = self.add_argument_group(dtype._argument_group_name )
else:
__A : List[Any] = self
try:
__A : Dict[str, type] = get_type_hints(__lowerCamelCase )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowerCamelCase ):
__A : List[str] = '''.'''.join(map(__lowerCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowerCamelCase ):
if not field.init:
continue
__A : int = type_hints[field.name]
self._parse_dataclass_field(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__A : Tuple = []
if args_filename:
args_files.append(Path(__lowerCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__A : Dict = ArgumentParser()
args_file_parser.add_argument(__lowerCamelCase , type=__lowerCamelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__A , __A : List[Any] = args_file_parser.parse_known_args(args=__lowerCamelCase )
__A : Dict = vars(__lowerCamelCase ).get(args_file_flag.lstrip('''-''' ) , __lowerCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(__lowerCamelCase ) for p in cmd_args_file_paths] )
__A : Any = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__A : List[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
__A , __A : Tuple = self.parse_known_args(args=__lowerCamelCase )
__A : int = []
for dtype in self.dataclass_types:
__A : List[str] = {f.name for f in dataclasses.fields(__lowerCamelCase ) if f.init}
__A : List[str] = {k: v for k, v in vars(__lowerCamelCase ).items() if k in keys}
for k in keys:
delattr(__lowerCamelCase , __lowerCamelCase )
__A : int = dtype(**__lowerCamelCase )
outputs.append(__lowerCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowerCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
__A : Tuple = set(args.keys() )
__A : Union[str, Any] = []
for dtype in self.dataclass_types:
__A : str = {f.name for f in dataclasses.fields(__lowerCamelCase ) if f.init}
__A : Optional[int] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__A : int = dtype(**__lowerCamelCase )
outputs.append(__lowerCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(__lowerCamelCase )}""" )
return tuple(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
with open(Path(__lowerCamelCase ) , encoding='''utf-8''' ) as open_json_file:
__A : List[str] = json.loads(open_json_file.read() )
__A : List[str] = self.parse_dict(__lowerCamelCase , allow_extra_keys=__lowerCamelCase )
return tuple(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = False ):
'''simple docstring'''
__A : Dict = self.parse_dict(yaml.safe_load(Path(__lowerCamelCase ).read_text() ) , allow_extra_keys=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 179
| 1
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowercase ( __A ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def _lowercase ( __A ,__A ):
'''simple docstring'''
return (-y * np.log(__A ) - (1 - y) * np.log(1 - h )).mean()
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = np.dot(__A ,__A )
return np.sum(y * scores - np.log(1 + np.exp(__A ) ) )
def _lowercase ( __A ,__A ,__A ,__A=70_000 ):
'''simple docstring'''
__UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(__A ):
__UpperCamelCase = np.dot(__A ,__A )
__UpperCamelCase = sigmoid_function(__A )
__UpperCamelCase = np.dot(x.T ,h - y ) / y.size
__UpperCamelCase = theta - alpha * gradient # updating the weights
__UpperCamelCase = np.dot(__A ,__A )
__UpperCamelCase = sigmoid_function(__A )
__UpperCamelCase = cost_function(__A ,__A )
if iterations % 100 == 0:
print(f"loss: {j} \t" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a__ : Any = datasets.load_iris()
a__ : str = iris.data[:, :2]
a__ : Dict = (iris.target != 0) * 1
a__ : Union[str, Any] = 0.1
a__ : int = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print('theta: ', theta) # printing the theta i.e our weights vector
def _lowercase ( __A ):
'''simple docstring'''
return sigmoid_function(
np.dot(__A ,__A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a__) , (a__)) : str = (x[:, 0].min(), x[:, 0].max())
((a__) , (a__)) : List[Any] = (x[:, 1].min(), x[:, 1].max())
((a__) , (a__)) : Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a__ : Dict = np.c_[xxa.ravel(), xxa.ravel()]
a__ : Dict = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 243
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = filter(lambda __A : p.requires_grad ,model.parameters() )
__UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Optional[Any] = logging.getLogger(__name__)
def _lowercase ( __A ,__A ):
'''simple docstring'''
if metric == "rouge2":
__UpperCamelCase = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
__UpperCamelCase = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
__UpperCamelCase = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
""" function.""" )
__UpperCamelCase = ModelCheckpoint(
dirpath=__A ,filename=__A ,monitor=f"val_{metric}" ,mode="""max""" ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def _lowercase ( __A ,__A ):
'''simple docstring'''
return EarlyStopping(
monitor=f"val_{metric}" ,mode="""min""" if """loss""" in metric else """max""" ,patience=__A ,verbose=__A ,)
class UpperCAmelCase__ ( pl.Callback):
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase = {f"lr_group_{i}": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase=True ) -> None:
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
__UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase = od / """test_results.txt"""
__UpperCamelCase = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__UpperCamelCase = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=lowercase )
generations_file.parent.mkdir(exist_ok=lowercase )
with open(lowercase , """a+""" ) as writer:
for key in sorted(lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase = metrics[key]
if isinstance(lowercase , torch.Tensor ):
__UpperCamelCase = val.item()
__UpperCamelCase = f"{key}: {val:.6f}\n"
writer.write(lowercase )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> str:
try:
__UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase = pl_module.model.num_parameters()
__UpperCamelCase = count_trainable_parameters(lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase , lowercase , """test""" )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 243
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'MobileNetV1Config'
# Base docstring
__a = 'google/mobilenet_v1_1.0_224'
__a = [1, 1_0_2_4, 7, 7]
# Image classification docstring
__a = 'google/mobilenet_v1_1.0_224'
__a = 'tabby, tabby cat'
__a = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def a ( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] , snake_case__: List[Any]=None ):
'''simple docstring'''
lowercase_ = {}
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = model.mobilenet_va
else:
lowercase_ = model
lowercase_ = '''MobilenetV1/Conv2d_0/'''
lowercase_ = backbone.conv_stem.convolution.weight
lowercase_ = backbone.conv_stem.normalization.bias
lowercase_ = backbone.conv_stem.normalization.weight
lowercase_ = backbone.conv_stem.normalization.running_mean
lowercase_ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowercase_ = i + 1
lowercase_ = i * 2
lowercase_ = backbone.layer[pt_index]
lowercase_ = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase_ = pointer.convolution.weight
lowercase_ = pointer.normalization.bias
lowercase_ = pointer.normalization.weight
lowercase_ = pointer.normalization.running_mean
lowercase_ = pointer.normalization.running_var
lowercase_ = backbone.layer[pt_index + 1]
lowercase_ = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase_ = pointer.convolution.weight
lowercase_ = pointer.normalization.bias
lowercase_ = pointer.normalization.weight
lowercase_ = pointer.normalization.running_mean
lowercase_ = pointer.normalization.running_var
if isinstance(snake_case__ , snake_case__ ):
lowercase_ = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowercase_ = model.classifier.weight
lowercase_ = model.classifier.bias
return tf_to_pt_map
def a ( snake_case__: Dict , snake_case__: List[str] , snake_case__: List[Any] ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowercase_ = tf.train.list_variables(snake_case__ )
lowercase_ = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowercase_ = tf.train.load_variable(snake_case__ , snake_case__ )
lowercase_ = array
# Build TF to PyTorch weights loading map
lowercase_ = _build_tf_to_pytorch_map(snake_case__ , snake_case__ , snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowercase_ = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowercase_ = np.transpose(snake_case__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase_ = array.squeeze().transpose()
else:
lowercase_ = np.transpose(snake_case__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowercase_ = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ , snake_case__ )
tf_weights.pop(name + '''/RMSProp''' , snake_case__ )
tf_weights.pop(name + '''/RMSProp_1''' , snake_case__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , snake_case__ )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def a ( snake_case__: torch.Tensor , snake_case__: nn.Convad ):
'''simple docstring'''
lowercase_ , lowercase_ = features.shape[-2:]
lowercase_ , lowercase_ = conv_layer.stride
lowercase_ , lowercase_ = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase_ = max(kernel_height - stride_height , 0 )
else:
lowercase_ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase_ = max(kernel_width - stride_width , 0 )
else:
lowercase_ = max(kernel_width - (in_width % stride_width) , 0 )
lowercase_ = pad_along_width // 2
lowercase_ = pad_along_width - pad_left
lowercase_ = pad_along_height // 2
lowercase_ = pad_along_height - pad_top
lowercase_ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ , snake_case__ , '''constant''' , 0.0 )
class lowercase__( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool or str] = True , ) -> None:
super().__init__()
lowercase_ = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase_ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase_ = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , padding_mode='''zeros''' , )
if use_normalization:
lowercase_ = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=SCREAMING_SNAKE_CASE_ , track_running_stats=SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , SCREAMING_SNAKE_CASE_ ):
lowercase_ = ACTaFN[config.hidden_act]
else:
lowercase_ = config.hidden_act
else:
lowercase_ = None
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
lowercase_ = apply_tf_padding(SCREAMING_SNAKE_CASE_ , self.convolution )
lowercase_ = self.convolution(SCREAMING_SNAKE_CASE_ )
if self.normalization is not None:
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
if self.activation is not None:
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return features
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = MobileNetVaConfig
a :Tuple = load_tf_weights_in_mobilenet_va
a :Optional[Any] = 'mobilenet_v1'
a :Any = 'pixel_values'
a :Optional[Any] = False
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(SCREAMING_SNAKE_CASE_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__a = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig , SCREAMING_SNAKE_CASE_ : bool = True ) -> Optional[int]:
super().__init__(SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = 3_2
lowercase_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase_ = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=config.num_channels , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=2 , )
lowercase_ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase_ = nn.ModuleList()
for i in range(1_3 ):
lowercase_ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=strides[i] , groups=SCREAMING_SNAKE_CASE_ , ) )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=1 , ) )
lowercase_ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase_ = self.conv_stem(SCREAMING_SNAKE_CASE_ )
lowercase_ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = all_hidden_states + (hidden_states,)
lowercase_ = hidden_states
if self.pooler is not None:
lowercase_ = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE_ ) , start_dim=1 )
else:
lowercase_ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig ) -> None:
super().__init__(SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = MobileNetVaModel(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase_ = nn.Dropout(config.classifier_dropout_prob , inplace=SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.mobilenet_va(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier(self.dropout(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ = '''single_label_classification'''
else:
lowercase_ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase_ = MSELoss()
if self.num_labels == 1:
lowercase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ = BCEWithLogitsLoss()
lowercase_ = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states , )
| 30
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a = logging.get_logger(__name__)
__a = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if config is None:
assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
lowercase_ = self.model.config
else:
lowercase_ = config
lowercase_ = data_args
lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase_ = label_smoothed_nll_loss
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
if self.optimizer is None:
lowercase_ = ['''bias''', '''LayerNorm.weight''']
lowercase_ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase_ = Adafactor
lowercase_ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase_ = AdamW
lowercase_ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase_ = self.args.learning_rate
if self.sharded_ddp:
lowercase_ = OSS(
params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.lr_scheduler is None:
lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
lowercase_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
return scheduler
def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2]
else:
# compute label smoothed loss
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
lowercase_ = inputs.pop('''labels''' )
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return loss
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase_ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
lowercase_ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
# If PAD token is not defined at least EOS token has to be defined
lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
lowercase_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase_ = tensor
return padded_tensor
| 30
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_A : Union[str, Any] =logging.get_logger(__name__)
class _lowercase ( _lowercase ):
def __init__( self: int , *UpperCamelCase__: Any , **UpperCamelCase__: List[str] ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 129
|
'''simple docstring'''
from torch import nn
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 129
| 1
|
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : int =len(SCREAMING_SNAKE_CASE )
a__ : int =len(SCREAMING_SNAKE_CASE )
a__ : int =(
first_str_length if first_str_length > second_str_length else second_str_length
)
a__ : list =[]
for char_count in range(SCREAMING_SNAKE_CASE ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 95
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase__( ):
lowercase_ : Any = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
# Parse args
lowercase_ , lowercase_ : Dict = parser.parse_known_args()
if not hasattr(__SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
lowercase_ : int = parse_unknown_args(__SCREAMING_SNAKE_CASE )
# Run
lowercase_ : List[Any] = args.func(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 213
| 0
|
import math
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase__ ( lowerCAmelCase = 0.1 ):
"""simple docstring"""
_lowerCAmelCase = 3
_lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A__ : List[str] =datasets.logging.get_logger(__name__)
A__ : List[Any] ='''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
A__ : List[str] ='''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
A__ : List[Any] ='''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase="dummy_doc" ):
"""simple docstring"""
_lowerCAmelCase = {doc: key_lines}
_lowerCAmelCase = {doc: sys_lines}
_lowerCAmelCase = {}
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase , _lowerCAmelCase = reader.get_doc_mentions(lowerCAmelCase , key_doc_lines[doc] , lowerCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase = reader.set_annotated_parse_trees(lowerCAmelCase , key_doc_lines[doc] , lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = reader.get_doc_mentions(lowerCAmelCase , sys_doc_lines[doc] , lowerCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase = reader.set_annotated_parse_trees(lowerCAmelCase , key_doc_lines[doc] , lowerCAmelCase , lowerCAmelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase = reader.remove_nested_coref_mentions(lowerCAmelCase , lowerCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase = reader.remove_nested_coref_mentions(lowerCAmelCase , lowerCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase = reader.get_mention_assignments(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = reader.get_mention_assignments(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = get_coref_infos(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = {}
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = evaluator.evaluate_documents(lowerCAmelCase , lowerCAmelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , f"Recall: {recall * 1_00:.2f}" , f" Precision: {precision * 1_00:.2f}" , f" F1: {fa * 1_00:.2f}" , )
if conll_subparts_num == 3:
_lowerCAmelCase = (conll / 3) * 1_00
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase = line.split()[5]
if not parse_col == "-":
_lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def lowercase__ ( self : str ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def lowercase__ ( self : List[Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Dict=True , __snake_case : List[str]=False , __snake_case : List[Any]=False , __snake_case : Dict=False ) -> Union[str, Any]:
_lowerCAmelCase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase = util.check_gold_parse_annotation(__snake_case )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase = evaluate(
key_lines=__snake_case , sys_lines=__snake_case , metrics=__snake_case , NP_only=__snake_case , remove_nested=__snake_case , keep_singletons=__snake_case , min_span=__snake_case , )
return score
| 220
| 0
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase :
def __init__( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =False
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
_lowercase =RagRetriever(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , index=lowerCAmelCase , init_retrieval=lowerCAmelCase , )
_lowercase =True
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.retriever.index.init_index()
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase , _lowercase =self.retriever._main_retrieve(lowerCAmelCase , lowerCAmelCase )
return doc_ids, retrieved_doc_embeds
class __lowerCAmelCase ( snake_case_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> Dict:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowerCAmelCase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , index=lowerCAmelCase , init_retrieval=lowerCAmelCase , )
_lowercase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for worker in self.retrieval_workers
] )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_lowercase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
_lowercase , _lowercase =ray.get(random_worker.retrieve.remote(lowerCAmelCase , lowerCAmelCase ) )
else:
_lowercase , _lowercase =self._main_retrieve(lowerCAmelCase , lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase )
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return super(lowerCAmelCase , cls ).get_tokenizers(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =kwargs.pop('config' , lowerCAmelCase ) or RagConfig.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
_lowercase =RagTokenizer.from_pretrained(lowerCAmelCase , config=lowerCAmelCase )
_lowercase =rag_tokenizer.question_encoder
_lowercase =rag_tokenizer.generator
if indexed_dataset is not None:
_lowercase ='custom'
_lowercase =CustomHFIndex(config.retrieval_vector_size , lowerCAmelCase )
else:
_lowercase =cls._build_index(lowerCAmelCase )
return cls(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , retrieval_workers=lowerCAmelCase , index=lowerCAmelCase , )
| 205
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19
| 0
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.054571817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
_A : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_A : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_A : int = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_A : Tuple = [[0.0, 0.0], [0.0, 0.0]]
_A , _A : List[str] = matrix[1][1], matrix[0][0]
_A , _A : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_A : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A : List[Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
_A : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A : Union[str, Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'vocab_file': 'vocab.txt'}
UpperCamelCase_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCamelCase_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
UpperCamelCase_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = VOCAB_FILES_NAMES
a_ : int = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
a_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any = ConvBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->List[str]:
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , __UpperCAmelCase) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase) != tokenize_chinese_chars
):
a_ = getattr(__UpperCAmelCase , normalizer_state.pop("type"))
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**__UpperCAmelCase)
a_ = do_lower_case
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None) ->Optional[Any]:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple[str]:
a_ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase)
return tuple(__UpperCAmelCase)
| 243
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = """"""
a_ : Dict = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) ->Optional[int]:
super().__init__(self , **__UpperCAmelCase)
a_ = repo_info
a_ = token
a_ = None
def UpperCAmelCase__ ( self) ->Tuple:
if self.dir_cache is None:
a_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCAmelCase): {"name": str(__UpperCAmelCase), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = "rb" , **__UpperCAmelCase , ) ->List[Any]:
if not isinstance(self.repo_info , __UpperCAmelCase):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''')
a_ = hf_hub_url(self.repo_info.id , __UpperCAmelCase , revision=self.repo_info.sha)
return fsspec.open(
__UpperCAmelCase , mode=__UpperCAmelCase , headers=get_authentication_headers_for_url(__UpperCAmelCase , use_auth_token=self.token) , client_kwargs={"trust_env": True} , ).open()
def UpperCAmelCase__ ( self , __UpperCAmelCase , **__UpperCAmelCase) ->int:
self._get_dirs()
a_ = self._strip_protocol(__UpperCAmelCase)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase) ->List[Any]:
self._get_dirs()
a_ = PurePosixPath(path.strip("/"))
a_ = {}
for p, f in self.dir_cache.items():
a_ = PurePosixPath(p.strip("/"))
a_ = p.parent
if root == path:
a_ = f
a_ = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out)
| 243
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = b.T
lowercase__ = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
lowercase__ = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
lowercase__ = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = x.reshape(-1 , 3 )
lowercase__ = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class _a ( UpperCamelCase__ ):
_lowercase : str = ['''pixel_values''']
def __init__( self: int , UpperCamelCase_: Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: bool = True , UpperCamelCase_: bool = True , **UpperCamelCase_: Dict , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = size if size is not None else {'''height''': 256, '''width''': 256}
lowercase__ = get_size_dict(UpperCamelCase_ )
lowercase__ = np.array(UpperCamelCase_ ) if clusters is not None else None
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_normalize
lowercase__ = do_color_quantize
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Any , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase_ , size=(size['''height'''], size['''width''']) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: np.ndarray , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = rescale(image=UpperCamelCase_ , scale=1 / 127.5 , data_format=UpperCamelCase_ )
lowercase__ = image - 1
return image
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: ImageInput , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCamelCase_: Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(UpperCamelCase_ )
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase__ = clusters if clusters is not None else self.clusters
lowercase__ = np.array(UpperCamelCase_ )
lowercase__ = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=UpperCamelCase_ ) for image in images]
if do_color_quantize:
lowercase__ = [to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase__ = np.array(UpperCamelCase_ )
lowercase__ = color_quantize(UpperCamelCase_ , UpperCamelCase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase__ = images.shape[0]
lowercase__ = images.reshape(UpperCamelCase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase__ = list(UpperCamelCase_ )
else:
lowercase__ = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
lowercase__ = {'''input_ids''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 354
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 93
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase_ : int | str):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = str(lowerCamelCase_)
return n == n[::-1]
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000000):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 0
for i in range(1 ,lowerCamelCase_):
if is_palindrome(lowerCamelCase_) and is_palindrome(bin(lowerCamelCase_).split('''b''')[1]):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 129
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =(DPMSolverSDEScheduler,)
snake_case_ =10
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__lowerCamelCase )
return config
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase ,beta_end=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = self.dummy_model()
lowerCAmelCase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Dict = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase__ : List[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Tuple = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Any = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self.dummy_model()
lowerCAmelCase__ : List[Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = output.prev_sample
lowerCAmelCase__ : List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Union[str, Any] = scheduler_class(**__lowerCamelCase ,use_karras_sigmas=__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__lowerCamelCase )
for t in scheduler.timesteps:
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : str = output.prev_sample
lowerCAmelCase__ : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 129
| 1
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : int = 9
lowerCAmelCase__ : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCAmelCase__ : Tuple = kruskal(A_ , A_ )
lowerCAmelCase__ : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(A_ ) == sorted(A_ )
| 74
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ""
lowercase__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase__ = None # compression type in fsspec. ex: "gzip"
lowercase__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Dict ,lowercase_ : str = "" ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[dict] = None ,**lowercase_ : Any ):
super().__init__(self ,**lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase__ : Dict = fsspec.open(
lowercase_ ,mode='''rb''' ,protocol=lowercase_ ,compression=self.compression ,client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
lowerCAmelCase__ : Any = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCAmelCase__ : Tuple = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCAmelCase__ : Any = None
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ,lowercase_ : int ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('''/''' )
def __lowerCAmelCase ( self : Optional[Any] ):
if self.dir_cache is None:
lowerCAmelCase__ : List[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCAmelCase__ : str = {f['''name''']: f}
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : str ):
return self.file.open().read()
def __lowerCAmelCase ( self : Tuple ,lowercase_ : str ,lowercase_ : str = "rb" ,lowercase_ : List[Any]=None ,lowercase_ : Dict=True ,lowercase_ : Any=None ,**lowercase_ : Tuple ,):
lowerCAmelCase__ : Union[str, Any] = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "bz2"
lowercase__ = "bz2"
lowercase__ = ".bz2"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "gzip"
lowercase__ = "gzip"
lowercase__ = ".gz"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "lz4"
lowercase__ = "lz4"
lowercase__ = ".lz4"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "xz"
lowercase__ = "xz"
lowercase__ = ".xz"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "zstd"
lowercase__ = "zstd"
lowercase__ = ".zst"
def __init__( self : str ,lowercase_ : str ,lowercase_ : str = "rb" ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[dict] = None ,lowercase_ : int = DEFAULT_BLOCK_SIZE ,**lowercase_ : Union[str, Any] ,):
super().__init__(
fo=lowercase_ ,mode=lowercase_ ,target_protocol=lowercase_ ,target_options=lowercase_ ,block_size=lowercase_ ,**lowercase_ ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase__ : List[str] = self.file.__enter__
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : Tuple = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : int ,*lowercase_ : str ,**lowercase_ : Optional[Any] ):
self._file.__exit__(*lowercase_ ,**lowercase_ )
def __iter__( self : Union[str, Any] ):
return iter(self._file )
def __lowerCAmelCase ( self : Tuple ):
return next(self._file )
def __getattr__( self : str ,lowercase_ : Any ):
return getattr(self._file ,lowercase_ )
def fixed_enter(*lowercase_ : List[Any] ,**lowercase_ : Dict ):
return WrappedFile(_enter(*lowercase_ ,**lowercase_ ) )
lowerCAmelCase__ : Any = fixed_enter
| 74
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _snake_case ( snake_case__ : Optional[Any] ):
A = SwinConfig()
A = swin_name.split('_' )
A = name_split[1]
A = int(name_split[4] )
A = int(name_split[3][-1] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "in22k" in swin_name:
A = 2_1841
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[str] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swin.' + name
return name
def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[
dim : dim * 2, :
]
A = val[-dim:, :]
else:
A = val[
:dim
]
A = val[
dim : dim * 2
]
A = val[
-dim:
]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swin_config(snake_case__ )
A = SwinForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 74
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a :
UpperCAmelCase_ : torch.Tensor # [batch_size x 3]
UpperCAmelCase_ : torch.Tensor # [batch_size x 3]
UpperCAmelCase_ : torch.Tensor # [batch_size x 3]
UpperCAmelCase_ : torch.Tensor # [batch_size x 3]
UpperCAmelCase_ : int
UpperCAmelCase_ : int
UpperCAmelCase_ : float
UpperCAmelCase_ : float
UpperCAmelCase_ : Tuple[int]
def UpperCamelCase_ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase_ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase_ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase_ ( self ):
lowercase = torch.arange(self.height * self.width )
lowercase = torch.stack(
[
pixel_indices % self.width,
torch.div(_lowerCamelCase , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def UpperCamelCase_ ( self ):
lowercase , *lowercase = self.shape
lowercase = int(np.prod(_lowerCamelCase ) )
lowercase = self.get_image_coords()
lowercase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowercase = self.get_camera_rays(_lowerCamelCase )
lowercase = rays.view(_lowerCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase , *lowercase , lowercase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowercase = coords.view(_lowerCamelCase , -1 , 2 )
lowercase = self.resolution()
lowercase = self.fov()
lowercase = (flat.float() / (res - 1)) * 2 - 1
lowercase = fracs * torch.tan(fov / 2 )
lowercase = fracs.view(_lowerCamelCase , -1 , 2 )
lowercase = (
self.z.view(_lowerCamelCase , 1 , 3 )
+ self.x.view(_lowerCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_lowerCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
lowercase = directions / directions.norm(dim=-1 , keepdim=_lowerCamelCase )
lowercase = torch.stack(
[
torch.broadcast_to(self.origin.view(_lowerCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_lowerCamelCase , *_lowerCamelCase , 2 , 3 )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_lowerCamelCase , height=_lowerCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
lowercase = []
lowercase = []
lowercase = []
lowercase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowercase = np.array([np.sin(__snake_case ), np.cos(__snake_case ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowercase = -z * 4
lowercase = np.array([np.cos(__snake_case ), -np.sin(__snake_case ), 0.0] )
lowercase = np.cross(__snake_case , __snake_case )
origins.append(__snake_case )
xs.append(__snake_case )
ys.append(__snake_case )
zs.append(__snake_case )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__snake_case , axis=0 ) ).float() , width=__snake_case , height=__snake_case , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__snake_case )) , )
| 220
| 0
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__a: Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "generated"
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _lowerCAmelCase( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> int:
lowercase__ : Any = {}
if truncation is not None:
lowercase__ : List[str] = truncation
lowercase__ : Union[str, Any] = generate_kwargs
lowercase__ : List[str] = {}
if return_tensors is not None and return_type is None:
lowercase__ : Optional[int] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase__ : Union[str, Any] = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ : Any = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase__ : int = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
return True
def _lowerCAmelCase( self , *__lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase__ : Optional[int] = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , __lowerCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase__ : Dict = ([prefix + arg for arg in args[0]],)
lowercase__ : List[Any] = True
elif isinstance(args[0] , __lowerCAmelCase ):
lowercase__ : Any = (prefix + args[0],)
lowercase__ : Any = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase__ : Optional[Any] = self.tokenizer(*__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[int]:
lowercase__ : Optional[Any] = super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
if (
isinstance(args[0] , __lowerCAmelCase )
and all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for el in args[0] )
and all(len(__lowerCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , **__lowerCAmelCase ) -> int:
lowercase__ : Optional[int] = self._parse_and_tokenize(__lowerCAmelCase , truncation=__lowerCAmelCase , **__lowerCAmelCase )
return inputs
def _lowerCAmelCase( self , __lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
if self.framework == "pt":
lowercase__ : Union[str, Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase__ : Any = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase__ : List[Any] = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase__ : Any = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(__lowerCAmelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase__ : List[Any] = self.model.generate(**__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : str = output_ids.shape[0]
if self.framework == "pt":
lowercase__ : Any = output_ids.reshape(__lowerCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase__ : List[str] = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.TEXT , __lowerCAmelCase=False ) -> List[Any]:
lowercase__ : Union[str, Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase__ : Tuple = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase__ : List[Any] = {
F"""{self.return_name}_text""": self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "summary"
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
return super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "translation"
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def _lowerCAmelCase( self , *__lowerCAmelCase , __lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Any:
if getattr(self.tokenizer , '''_build_translation_inputs''' , __lowerCAmelCase ):
return self.tokenizer._build_translation_inputs(
*__lowerCAmelCase , return_tensors=self.framework , truncation=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase )
else:
return super()._parse_and_tokenize(*__lowerCAmelCase , truncation=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ) -> Any:
lowercase__ : Optional[int] = super()._sanitize_parameters(**__lowerCAmelCase )
if src_lang is not None:
lowercase__ : List[str] = src_lang
if tgt_lang is not None:
lowercase__ : int = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase__ : List[str] = kwargs.get('''task''' , self.task )
lowercase__ : List[str] = task.split('''_''' )
if task and len(__lowerCAmelCase ) == 4:
# translation, XX, to YY
lowercase__ : Optional[int] = items[1]
lowercase__ : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict:
return super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
| 370
|
'''simple docstring'''
def __UpperCamelCase ( ):
lowercase__ : Any = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowercase__ : Any = 6
lowercase__ : Optional[Any] = 1
lowercase__ : int = 1901
lowercase__ : List[str] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase__ : List[Any] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowercase__ : Any = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowercase__ : List[Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
lowercase__ : Dict = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 214
| 0
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
def lowercase_ ( _A : str ):
"""simple docstring"""
lowerCamelCase__ : Tuple = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
lowerCamelCase__ : Dict = MaskFormerConfig(backbone_config=_A )
lowerCamelCase__ : int = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase__ : Any = 847
lowerCamelCase__ : Optional[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowerCamelCase__ : List[str] = 150
lowerCamelCase__ : int = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase__ : str = 171
lowerCamelCase__ : Optional[Any] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowerCamelCase__ : str = 133
lowerCamelCase__ : str = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase__ : Tuple = 19
lowerCamelCase__ : Any = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowerCamelCase__ : Tuple = 65
lowerCamelCase__ : Optional[Any] = '''mapillary-vistas-id2label.json'''
lowerCamelCase__ : List[Any] = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ : str = {int(_A ): v for k, v in idalabel.items()}
return config
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def lowercase_ ( _A : Any , _A : Any , _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : Any = dct.pop(_A )
lowerCamelCase__ : List[Any] = val
def lowercase_ ( _A : Optional[Any] , _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ : List[Any] = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
lowerCamelCase__ : Any = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : str = in_proj_weight[:dim, :]
lowerCamelCase__ : int = in_proj_bias[: dim]
lowerCamelCase__ : Dict = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ : Optional[Any] = in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ : Dict = in_proj_weight[
-dim :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _A : str , _A : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : Any = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
lowerCamelCase__ : Optional[int] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Tuple = in_proj_weight[: hidden_size, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[:config.hidden_size]
lowerCamelCase__ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Optional[Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
lowerCamelCase__ : Dict = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Dict = in_proj_weight[: hidden_size, :]
lowerCamelCase__ : Dict = in_proj_bias[:config.hidden_size]
lowerCamelCase__ : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Union[str, Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : int = in_proj_bias[-hidden_size :]
# fmt: on
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Dict = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def lowercase_ ( _A : str , _A : str , _A : str , _A : bool = False ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = get_maskformer_config(_A )
# load original state_dict
with open(_A , "rb" ) as f:
lowerCamelCase__ : Optional[Any] = pickle.load(_A )
lowerCamelCase__ : Any = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase__ : List[str] = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_swin_q_k_v(_A , config.backbone_config )
read_in_decoder_q_k_v(_A , _A )
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase__ : List[str] = torch.from_numpy(_A )
# load 🤗 model
lowerCamelCase__ : Tuple = MaskFormerForInstanceSegmentation(_A )
model.eval()
for name, param in model.named_parameters():
print(_A , param.shape )
lowerCamelCase__ : int = model.load_state_dict(_A , strict=_A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_A ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
lowerCamelCase__ : Optional[Any] = prepare_img()
if "vistas" in model_name:
lowerCamelCase__ : List[str] = 65
elif "cityscapes" in model_name:
lowerCamelCase__ : List[str] = 65535
else:
lowerCamelCase__ : str = 255
lowerCamelCase__ : Union[str, Any] = True if '''ade''' in model_name else False
lowerCamelCase__ : Any = MaskFormerImageProcessor(ignore_index=_A , reduce_labels=_A )
lowerCamelCase__ : int = image_processor(_A , return_tensors="pt" )
lowerCamelCase__ : Any = model(**_A )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase__ : int = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A : Optional[int] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 184
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : str = (self.image_size // 3_2) ** 2
UpperCAmelCase : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ):
UpperCAmelCase : int = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = ViTHybridModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__A )
UpperCAmelCase : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 336
| 0
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ , lowercase__: Any = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
lowercase__ , lowercase__: List[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
lowercase__: Optional[int] = controlnet_params
lowercase__: Optional[int] = 'bird'
lowercase__: str = jax.device_count()
lowercase__: List[str] = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__: Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__: Tuple = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__: List[Any] = jax.random.PRNGKey(0 )
lowercase__: Tuple = jax.random.split(lowerCAmelCase__ , jax.device_count() )
lowercase__: Dict = replicate(lowerCAmelCase__ )
lowercase__: str = shard(lowerCAmelCase__ )
lowercase__: int = shard(lowerCAmelCase__ )
lowercase__: Dict = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase__: Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__: List[str] = images[0, 253:256, 253:256, -1]
lowercase__: Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__: Union[str, Any] = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ , lowercase__: Dict = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
lowercase__ , lowercase__: Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
lowercase__: List[str] = controlnet_params
lowercase__: List[Any] = 'Chef in the kitchen'
lowercase__: Optional[int] = jax.device_count()
lowercase__: int = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__: Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__: Any = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__: List[Any] = jax.random.PRNGKey(0 )
lowercase__: Dict = jax.random.split(lowerCAmelCase__ , jax.device_count() )
lowercase__: List[str] = replicate(lowerCAmelCase__ )
lowercase__: Optional[Any] = shard(lowerCAmelCase__ )
lowercase__: List[Any] = shard(lowerCAmelCase__ )
lowercase__: int = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase__: Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__: Tuple = images[0, 253:256, 253:256, -1]
lowercase__: Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__: Tuple = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 288
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowerCAmelCase = '''base_with_context'''
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
lowercase__: Optional[int] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__: List[str] = weights[f'layers_{lyr_num}']
lowercase__: List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowercase__: Any = ly_weight['attention']
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case_ ( snake_case , snake_case ) -> List[str]:
lowercase__: str = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__: str = weights[f'layers_{lyr_num}']
lowercase__: Optional[Any] = ly_weight['attention']
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case_ ( snake_case , snake_case ) -> Any:
lowercase__: int = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
lowercase__: Dict = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowercase__: Optional[Any] = weights[f'layers_{lyr_num}']
lowercase__: Any = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
lowercase__: List[str] = ly_weight['self_attention']
lowercase__: Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: int = ly_weight['MultiHeadDotProductAttention_0']
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: str = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Optional[Any] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
lowercase__: Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def snake_case_ ( snake_case ) -> Any:
lowercase__: int = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowercase__: Tuple = jnp.tree_util.tree_map(onp.array , snake_case )
lowercase__: List[str] = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
lowercase__: List[Any] = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
lowercase__: Optional[Any] = inference.parse_training_gin_file(snake_case , snake_case )
lowercase__: str = inference.InferenceModel(args.checkpoint_path , snake_case )
lowercase__: Dict = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
lowercase__: List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowercase__: Dict = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowercase__: Optional[Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowercase__: Dict = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , snake_case )
lowercase__: int = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , snake_case )
lowercase__: Optional[int] = load_decoder(ta_checkpoint['target']['decoder'] , snake_case )
lowercase__: int = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
lowercase__: List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=snake_case , continuous_encoder=snake_case , decoder=snake_case , scheduler=snake_case , melgan=snake_case , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__lowerCAmelCase = parser.parse_args()
main(args)
| 288
| 1
|
"""simple docstring"""
class lowercase__ :
def __init__( self : List[str] , snake_case__ : Tuple , snake_case__ : List[str]=None , snake_case__ : Optional[int]=None ):
lowerCamelCase_ : Optional[Any] =data
lowerCamelCase_ : List[Any] =previous
lowerCamelCase_ : int =next_node
def __str__( self : Tuple ):
return F"""{self.data}"""
def UpperCAmelCase__ ( self : int ):
return self.data
def UpperCAmelCase__ ( self : List[str] ):
return self.next
def UpperCAmelCase__ ( self : Optional[int] ):
return self.previous
class lowercase__ :
def __init__( self : int , snake_case__ : Optional[Any] ):
lowerCamelCase_ : Any =head
def __iter__( self : Optional[Any] ):
return self
def UpperCAmelCase__ ( self : Optional[int] ):
if not self.current:
raise StopIteration
else:
lowerCamelCase_ : Union[str, Any] =self.current.get_data()
lowerCamelCase_ : Optional[Any] =self.current.get_next()
return value
class lowercase__ :
def __init__( self : List[Any] ):
lowerCamelCase_ : Dict =None # First node in list
lowerCamelCase_ : Optional[int] =None # Last node in list
def __str__( self : str ):
lowerCamelCase_ : Any =self.head
lowerCamelCase_ : List[Any] =[]
while current is not None:
nodes.append(current.get_data() )
lowerCamelCase_ : str =current.get_next()
return " ".join(str(__SCREAMING_SNAKE_CASE ) for node in nodes )
def __contains__( self : List[Any] , snake_case__ : List[str] ):
lowerCamelCase_ : Optional[Any] =self.head
while current:
if current.get_data() == value:
return True
lowerCamelCase_ : int =current.get_next()
return False
def __iter__( self : List[str] ):
return LinkedListIterator(self.head )
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase__ ( self : Any ):
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Union[str, Any] ):
if self.head is None:
lowerCamelCase_ : int =node
lowerCamelCase_ : List[Any] =node
else:
self.insert_before_node(self.head , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Dict ):
if self.head is None:
self.set_head(__SCREAMING_SNAKE_CASE )
else:
self.insert_after_node(self.tail , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[Any] ):
lowerCamelCase_ : Dict =Node(__SCREAMING_SNAKE_CASE )
if self.head is None:
self.set_head(__SCREAMING_SNAKE_CASE )
else:
self.set_tail(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Any ):
lowerCamelCase_ : Dict =node
lowerCamelCase_ : List[str] =node.previous
if node.get_previous() is None:
lowerCamelCase_ : Union[str, Any] =node_to_insert
else:
lowerCamelCase_ : Optional[Any] =node_to_insert
lowerCamelCase_ : Any =node_to_insert
def UpperCAmelCase__ ( self : Dict , snake_case__ : str , snake_case__ : List[str] ):
lowerCamelCase_ : Optional[Any] =node
lowerCamelCase_ : Dict =node.next
if node.get_next() is None:
lowerCamelCase_ : Union[str, Any] =node_to_insert
else:
lowerCamelCase_ : Any =node_to_insert
lowerCamelCase_ : Union[str, Any] =node_to_insert
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[Any] , snake_case__ : Optional[int] ):
lowerCamelCase_ : List[str] =1
lowerCamelCase_ : Tuple =Node(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Dict =self.head
while node:
if current_position == position:
self.insert_before_node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return
current_position += 1
lowerCamelCase_ : List[Any] =node.next
self.insert_after_node(self.tail , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : str ):
lowerCamelCase_ : Optional[Any] =self.head
while node:
if node.get_data() == item:
return node
lowerCamelCase_ : Tuple =node.get_next()
raise Exception("Node not found" )
def UpperCAmelCase__ ( self : Dict , snake_case__ : int ):
if (node := self.get_node(__SCREAMING_SNAKE_CASE )) is not None:
if node == self.head:
lowerCamelCase_ : Union[str, Any] =self.head.get_next()
if node == self.tail:
lowerCamelCase_ : List[Any] =self.tail.get_previous()
self.remove_node_pointers(__SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase__ ( snake_case__ : Tuple ):
if node.get_next():
lowerCamelCase_ : str =node.previous
if node.get_previous():
lowerCamelCase_ : List[Any] =node.next
lowerCamelCase_ : Optional[Any] =None
lowerCamelCase_ : Dict =None
def UpperCAmelCase__ ( self : List[Any] ):
return self.head is None
def _snake_case ( ) -> Dict:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = [10, 20, 30, 40, 50, 60]
lowercase_ : Optional[Any] = [2, 4, 6, 8, 10, 12]
lowercase_ : Union[str, Any] = 1_00
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_10 )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 93
| 0
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
snake_case : Dict = "Usage of script: script_name <size_of_canvas:int>"
snake_case : List[Any] = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCAmelCase_ ( _snake_case : int ) -> list[list[bool]]:
'''simple docstring'''
__magic_name__ : int = [[False for i in range(_snake_case )] for j in range(_snake_case )]
return canvas
def lowerCAmelCase_ ( _snake_case : list[list[bool]] ) -> None:
'''simple docstring'''
for i, row in enumerate(_snake_case ):
for j, _ in enumerate(_snake_case ):
__magic_name__ : Optional[int] = bool(random.getrandbits(1 ) )
def lowerCAmelCase_ ( _snake_case : list[list[bool]] ) -> list[list[bool]]:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Tuple = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_snake_case ):
for c, pt in enumerate(_snake_case ):
__magic_name__ : Any = __judge_point(
_snake_case , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__magic_name__ : Tuple = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__magic_name__ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def lowerCAmelCase_ ( _snake_case : bool , _snake_case : list[list[bool]] ) -> bool:
'''simple docstring'''
__magic_name__ : List[Any] = 0
__magic_name__ : Optional[int] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__magic_name__ : Union[str, Any] = pt
if pt:
if alive < 2:
__magic_name__ : Optional[Any] = False
elif alive == 2 or alive == 3:
__magic_name__ : Optional[int] = True
elif alive > 3:
__magic_name__ : Dict = False
else:
if alive == 3:
__magic_name__ : List[str] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
snake_case : List[Any] = int(sys.argv[1])
# main working structure of this module.
snake_case : int = create_canvas(canvas_size)
seed(c)
snake_case : Union[str, Any] = plt.subplots()
fig.show()
snake_case : List[Any] = ListedColormap(["w", "k"])
try:
while True:
snake_case : Tuple = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 371
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
snake_case : List[str] = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = ["BeitFeatureExtractor"]
snake_case : Optional[int] = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 41
| 0
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ''''''
_lowerCamelCase: str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowerCamelCase: str = None # compression type in fsspec. ex: "gzip"
_lowerCamelCase: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Union[str, Any] ,A_ : str = "" ,A_ : Optional[str] = None ,A_ : Optional[dict] = None ,**A_ : int ) -> Optional[int]:
super().__init__(self ,**A_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
A = fsspec.open(
A_ ,mode='rb' ,protocol=A_ ,compression=self.compression ,client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
A = os.path.basename(self.file.path.split('::' )[0] )
A = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
A = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Dict:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(A_ ).lstrip('/' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
if self.dir_cache is None:
A = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
A = {f['name']: f}
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ) -> Dict:
return self.file.open().read()
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ,A_ : str = "rb" ,A_ : List[str]=None ,A_ : Optional[int]=True ,A_ : Dict=None ,**A_ : int ,) -> Tuple:
A = self._strip_protocol(A_ )
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = '''bz2'''
_lowerCamelCase: Tuple = '''bz2'''
_lowerCamelCase: Tuple = '''.bz2'''
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = '''gzip'''
_lowerCamelCase: str = '''gzip'''
_lowerCamelCase: Dict = '''.gz'''
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = '''lz4'''
_lowerCamelCase: Dict = '''lz4'''
_lowerCamelCase: List[Any] = '''.lz4'''
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = '''xz'''
_lowerCamelCase: Tuple = '''xz'''
_lowerCamelCase: List[str] = '''.xz'''
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = '''zstd'''
_lowerCamelCase: int = '''zstd'''
_lowerCamelCase: str = '''.zst'''
def __init__( self : Optional[Any] ,A_ : str ,A_ : str = "rb" ,A_ : Optional[str] = None ,A_ : Optional[dict] = None ,A_ : int = DEFAULT_BLOCK_SIZE ,**A_ : int ,) -> int:
super().__init__(
fo=A_ ,mode=A_ ,target_protocol=A_ ,target_options=A_ ,block_size=A_ ,**A_ ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
A = self.file.__enter__
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : int ) -> Any:
A = file_
def __enter__( self : Union[str, Any] ) -> Tuple:
self._file.__enter__()
return self
def __exit__( self : Dict ,*A_ : Dict ,**A_ : List[str] ) -> int:
self._file.__exit__(*A_ ,**A_ )
def __iter__( self : Optional[int] ) -> Optional[int]:
return iter(self._file )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return next(self._file )
def __getattr__( self : Optional[Any] ,A_ : int ) -> List[Any]:
return getattr(self._file ,A_ )
def fixed_enter(*A_ : List[Any] ,**A_ : List[str] ):
return WrappedFile(_enter(*A_ ,**A_ ) )
A = fixed_enter
| 74
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case__ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase: Optional[Any] = 2
@register_to_config
def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]:
if trained_betas is not None:
A = torch.tensor(A_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(A_ ,A_ ,A_ )
A = use_karras_sigmas
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple:
if schedule_timesteps is None:
A = self.timesteps
A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A = 1 if len(A_ ) > 1 else 0
else:
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
A = self.index_for_timestep(A_ )
A = self.sigmas[step_index]
A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]:
A = num_inference_steps
A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A = np.log(A_ )
A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ )
if self.config.use_karras_sigmas:
A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps )
A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] )
A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A = torch.from_numpy(A_ ).to(device=A_ )
A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A = torch.from_numpy(A_ )
A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith('mps' ):
# mps does not support float64
A = timesteps.to(A_ ,dtype=torch.floataa )
else:
A = timesteps.to(device=A_ )
# empty dt and derivative
A = None
A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A = defaultdict(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict:
# get log sigma
A = np.log(A_ )
# get distribution
A = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A = low_idx + 1
A = log_sigmas[low_idx]
A = log_sigmas[high_idx]
# interpolate sigmas
A = (low - log_sigma) / (low - high)
A = np.clip(A_ ,0 ,1 )
# transform interpolation to time range
A = (1 - w) * low_idx + w * high_idx
A = t.reshape(sigma.shape )
return t
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor:
A = in_sigmas[-1].item()
A = in_sigmas[0].item()
A = 7.0 # 7.0 is the value used in the paper
A = np.linspace(0 ,1 ,A_ )
A = sigma_min ** (1 / rho)
A = sigma_max ** (1 / rho)
A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.dt is None
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]:
A = self.index_for_timestep(A_ )
# advance index counter by 1
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A = self.sigmas[step_index]
A = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A = self.sigmas[step_index - 1]
A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A = 0
A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A = sigma_hat if self.state_in_first_order else sigma_next
A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A = sigma_hat if self.state_in_first_order else sigma_next
A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A = sigma_next - sigma_hat
# store for 2nd order step
A = derivative
A = dt
A = sample
else:
# 2. 2nd order / Heun's method
A = (sample - pred_original_sample) / sigma_next
A = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A = self.dt
A = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A = None
A = None
A = None
A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
A = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
A = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
A = self.timesteps.to(original_samples.device )
A = timesteps.to(original_samples.device )
A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps]
A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A = sigma.unsqueeze(-1 )
A = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> int:
return self.config.num_train_timesteps
| 74
| 1
|
import math
import sys
import cva
import numpy as np
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ : int = math.sqrt(__lowerCAmelCase )
snake_case__ : Tuple = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __lowerCAmelCase ):
for j in range(0 , __lowerCAmelCase ):
snake_case__ : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> np.ndarray:
"""simple docstring"""
snake_case__ : Optional[int] = np.zeros(img.shape )
snake_case__ : List[Any] = get_gauss_kernel(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ , snake_case__ : Optional[int] = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
snake_case__ : Tuple = get_slice(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[Any] = img_s - img_s[kernel_size // 2, kernel_size // 2]
snake_case__ : List[str] = vec_gaussian(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[int] = np.multiply(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Union[str, Any] = np.multiply(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Tuple = np.sum(__lowerCAmelCase ) / np.sum(__lowerCAmelCase )
snake_case__ : int = val
return imga
def _lowerCAmelCase ( __lowerCAmelCase ) -> tuple:
"""simple docstring"""
snake_case__ : List[Any] = args[1] if args[1:] else '''../image_data/lena.jpg'''
snake_case__ : int = float(args[2] ) if args[2:] else 1.0
snake_case__ : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
snake_case__ : Dict = int(args[4] )
snake_case__ : int = kernel_size + abs(kernel_size % 2 - 1 )
else:
snake_case__ : Any = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
A__ , A__ , A__ , A__ = parse_args(sys.argv)
A__ = cva.imread(filename, 0)
cva.imshow('''input image''', img)
A__ = img / 255
A__ = out.astype('''float32''')
A__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
A__ = out * 255
A__ = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 44
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = original_name.split('''.''' )[0]
snake_case__ : List[str] = key.split('''.''' )
snake_case__ : Optional[int] = int(key_list[key_list.index(__lowerCAmelCase ) - 2] )
snake_case__ : Optional[int] = int(key_list[key_list.index(__lowerCAmelCase ) - 1] )
snake_case__ : Any = orig_block_num - offset
snake_case__ : Tuple = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = OrderedDict()
snake_case__ , snake_case__ : List[str] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case__ : int = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case__ : Tuple = key[: key.find('''proj''' )]
snake_case__ : Union[str, Any] = key.replace(__lowerCAmelCase , f"""patch_embeddings.{total_embed_found}.""" )
snake_case__ : Dict = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case__ : Optional[int] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case__ : Optional[int] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case__ : Optional[Any] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case__ : int = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case__ : Tuple = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case__ : str = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case__ : Optional[int] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case__ : Union[str, Any] = key.replace('''head''' , '''classifier''' )
snake_case__ : Union[str, Any] = value
return new_state_dict
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : List[str] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = PoolFormerConfig()
# set attributes based on model_name
snake_case__ : List[Any] = '''huggingface/label-files'''
snake_case__ : Union[str, Any] = model_name[-3:]
snake_case__ : List[Any] = 1000
snake_case__ : Tuple = '''imagenet-1k-id2label.json'''
snake_case__ : Optional[int] = (1, 1000)
# set config attributes
snake_case__ : Dict = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Tuple = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case__ : List[str] = [2, 2, 6, 2]
snake_case__ : Union[str, Any] = [64, 128, 320, 512]
snake_case__ : Optional[int] = 4.0
snake_case__ : Tuple = 0.9
elif size == "s24":
snake_case__ : Tuple = [4, 4, 12, 4]
snake_case__ : Tuple = [64, 128, 320, 512]
snake_case__ : List[Any] = 4.0
snake_case__ : Dict = 0.9
elif size == "s36":
snake_case__ : Optional[Any] = [6, 6, 18, 6]
snake_case__ : str = [64, 128, 320, 512]
snake_case__ : List[Any] = 4.0
snake_case__ : Any = 1E-6
snake_case__ : Any = 0.9
elif size == "m36":
snake_case__ : Any = [6, 6, 18, 6]
snake_case__ : Union[str, Any] = [96, 192, 384, 768]
snake_case__ : Dict = 4.0
snake_case__ : Union[str, Any] = 1E-6
snake_case__ : List[Any] = 0.95
elif size == "m48":
snake_case__ : Optional[int] = [8, 8, 24, 8]
snake_case__ : List[str] = [96, 192, 384, 768]
snake_case__ : str = 4.0
snake_case__ : str = 1E-6
snake_case__ : Any = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case__ : Optional[Any] = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase )
# Prepare image
snake_case__ : Optional[int] = prepare_img()
snake_case__ : str = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case__ : List[str] = torch.load(__lowerCAmelCase , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case__ : str = rename_keys(__lowerCAmelCase )
# create HuggingFace model and load state dict
snake_case__ : List[str] = PoolFormerForImageClassification(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Define image processor
snake_case__ : int = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case__ : Dict = model(__lowerCAmelCase )
snake_case__ : str = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case__ : Tuple = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
snake_case__ : Optional[int] = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
snake_case__ : int = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
snake_case__ : Optional[int] = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
snake_case__ : List[str] = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A__ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : str = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _UpperCAmelCase ( __snake_case ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : str=None , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
if config is None:
assert isinstance(self.model , lowerCAmelCase_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
__lowerCAmelCase = self.model.config
else:
__lowerCAmelCase = config
__lowerCAmelCase = data_args
__lowerCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , lowerCAmelCase_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
__lowerCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__lowerCAmelCase = label_smoothed_nll_loss
def lowercase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
if self.optimizer is None:
__lowerCAmelCase = ['bias', 'LayerNorm.weight']
__lowerCAmelCase = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
__lowerCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__lowerCAmelCase = Adafactor
__lowerCAmelCase = {'scale_parameter': False, 'relative_step': False}
else:
__lowerCAmelCase = AdamW
__lowerCAmelCase = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
__lowerCAmelCase = self.args.learning_rate
if self.sharded_ddp:
__lowerCAmelCase = OSS(
params=lowerCAmelCase_ , optim=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
__lowerCAmelCase = optimizer_cls(lowerCAmelCase_ , **lowerCAmelCase_ )
if self.lr_scheduler is None:
__lowerCAmelCase = self._get_lr_scheduler(lowerCAmelCase_ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowercase ( self : str , lowerCAmelCase_ : Dict ) -> Tuple:
__lowerCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__lowerCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__lowerCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__lowerCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowerCAmelCase_ )
return scheduler
def lowercase ( self : List[Any] ) -> Optional[Any]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__lowerCAmelCase = model(**lowerCAmelCase_ , use_cache=lowerCAmelCase_ )[0]
__lowerCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__lowerCAmelCase = model(**lowerCAmelCase_ , labels=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )[:2]
else:
# compute label smoothed loss
__lowerCAmelCase = model(**lowerCAmelCase_ , use_cache=lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
__lowerCAmelCase = self.loss_fn(lowerCAmelCase_ , lowerCAmelCase_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> List[Any]:
__lowerCAmelCase = inputs.pop('labels' )
__lowerCAmelCase = self._compute_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return loss
def lowercase ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple = None , ) -> int:
__lowerCAmelCase = self._prepare_inputs(lowerCAmelCase_ )
__lowerCAmelCase = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__lowerCAmelCase = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **lowerCAmelCase_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__lowerCAmelCase = self._pad_tensors_to_max_len(lowerCAmelCase_ , gen_kwargs['max_length'] )
__lowerCAmelCase = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
__lowerCAmelCase = self._compute_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__lowerCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__lowerCAmelCase = self._pad_tensors_to_max_len(lowerCAmelCase_ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ) -> Any:
# If PAD token is not defined at least EOS token has to be defined
__lowerCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
f""" padded to `max_length`={max_length}""" )
__lowerCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__lowerCAmelCase = tensor
return padded_tensor
| 284
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , *a , **a):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 214
| 0
|
from timeit import timeit
def a__ ( A__ ):
if number < 0:
raise ValueError('the value of input must not be negative' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( A__ ):
if number < 0:
raise ValueError('the value of input must not be negative' )
SCREAMING_SNAKE_CASE_ : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
def do_benchmark(A__ ) -> None:
SCREAMING_SNAKE_CASE_ : Any = 'import __main__ as z'
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(A__ ) = }''' )
SCREAMING_SNAKE_CASE_ : Any = timeit('z.get_set_bits_count_using_modulo_operator(25)', setup=A__ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(A__ ) = }''' )
SCREAMING_SNAKE_CASE_ : str = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)', setup=A__, )
print(F'''timeit() runs in {timing} seconds''' )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 162
|
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = key
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(lowerCAmelCase__ ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : Dict = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE_ : str = ''
for ch in content:
ans += chr(ord(lowerCAmelCase__ ) ^ key )
return ans
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 0 ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('encrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
try:
with open(lowerCAmelCase__ ) as fin, open('decrypt.out' , 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCAmelCase__ , lowerCAmelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 162
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = IFInpaintingSuperResolutionPipeline
__a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__a = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase ( self : Tuple ):
return self._get_superresolution_dummy_components()
def lowercase ( self : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=0 ):
if str(_lowerCamelCase ).startswith('''mps''' ):
_snake_case = torch.manual_seed(_lowerCamelCase )
else:
_snake_case = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
_snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase ( self : Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowercase ( self : Tuple ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase ( self : List[Any] ):
self._test_save_load_local()
def lowercase ( self : int ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 288
|
"""simple docstring"""
from math import pow
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_snake_case = int(pow(__lowerCamelCase , __lowerCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_snake_case , _snake_case = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_snake_case , _snake_case = backtrack(
__lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase )
return current_sum, solutions_count
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(__lowerCamelCase , __lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288
| 1
|
def UpperCAmelCase_ ( _A ): # noqa: E741
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [0] * n
SCREAMING_SNAKE_CASE__ = [False] * n
SCREAMING_SNAKE_CASE__ = [False] * n
def dfs(_A , _A , _A , _A ):
if parent == root:
out_edge_count += 1
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
SCREAMING_SNAKE_CASE__ = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
SCREAMING_SNAKE_CASE__ = True
# AP found via cycle
if at == low[to]:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
_SCREAMING_SNAKE_CASE : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 362
|
import argparse
import copy
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
with open(_A ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ = []
_list.append([line.split()[1], line.split()[2]] )
SCREAMING_SNAKE_CASE__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ = []
_list.append([line.split()[0], line.split()[2]] )
SCREAMING_SNAKE_CASE__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
with open(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.read(1 )
SCREAMING_SNAKE_CASE__ = start_node
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = start_node
SCREAMING_SNAKE_CASE__ = 0
while visiting not in first_solution:
SCREAMING_SNAKE_CASE__ = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_A ) and k[0] not in first_solution:
SCREAMING_SNAKE_CASE__ = k[1]
SCREAMING_SNAKE_CASE__ = k[0]
first_solution.append(_A )
SCREAMING_SNAKE_CASE__ = distance_of_first_solution + int(_A )
SCREAMING_SNAKE_CASE__ = best_node
first_solution.append(_A )
SCREAMING_SNAKE_CASE__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
SCREAMING_SNAKE_CASE__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for n in solution[1:-1]:
SCREAMING_SNAKE_CASE__ = solution.index(_A )
for kn in solution[1:-1]:
SCREAMING_SNAKE_CASE__ = solution.index(_A )
if n == kn:
continue
SCREAMING_SNAKE_CASE__ = copy.deepcopy(_A )
SCREAMING_SNAKE_CASE__ = kn
SCREAMING_SNAKE_CASE__ = n
SCREAMING_SNAKE_CASE__ = 0
for k in _tmp[:-1]:
SCREAMING_SNAKE_CASE__ = _tmp[_tmp.index(_A ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
SCREAMING_SNAKE_CASE__ = distance + int(i[1] )
_tmp.append(_A )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
SCREAMING_SNAKE_CASE__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _A : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase_ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = first_solution
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = distance_of_first_solution
SCREAMING_SNAKE_CASE__ = solution
while count <= iters:
SCREAMING_SNAKE_CASE__ = find_neighborhood(_A , _A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution]
SCREAMING_SNAKE_CASE__ = len(_A ) - 1
SCREAMING_SNAKE_CASE__ = False
while not found:
SCREAMING_SNAKE_CASE__ = 0
while i < len(_A ):
if best_solution[i] != solution[i]:
SCREAMING_SNAKE_CASE__ = best_solution[i]
SCREAMING_SNAKE_CASE__ = solution[i]
break
SCREAMING_SNAKE_CASE__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = best_solution[:-1]
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
SCREAMING_SNAKE_CASE__ = cost
SCREAMING_SNAKE_CASE__ = solution
else:
SCREAMING_SNAKE_CASE__ = index_of_best_solution + 1
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution]
if len(_A ) >= size:
tabu_list.pop(0 )
SCREAMING_SNAKE_CASE__ = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase_ ( _A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = generate_neighbours(args.File )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = generate_first_solution(
args.File , _A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = tabu_search(
_A , _A , _A , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 218
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.