code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
def __lowercase ( snake_case, snake_case=False, snake_case=False, snake_case=False ):
"""simple docstring"""
__magic_name__ :List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
__magic_name__ :int = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ :Tuple = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
__magic_name__ :Dict = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ :Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ :Any = in_proj_bias[: config.hidden_size]
__magic_name__ :Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ :int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ :Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ :Dict = in_proj_bias[-config.hidden_size :]
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[Any] = dct.pop(snake_case )
__magic_name__ :Union[str, Any] = val
@torch.no_grad()
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = ViltConfig(image_size=3_8_4, patch_size=3_2, tie_word_embeddings=snake_case )
__magic_name__ :List[str] = False
__magic_name__ :List[str] = False
__magic_name__ :List[Any] = False
__magic_name__ :Union[str, Any] = False
if "vqa" in checkpoint_url:
__magic_name__ :Union[str, Any] = True
__magic_name__ :List[str] = 3_1_2_9
__magic_name__ :List[Any] = '''huggingface/label-files'''
__magic_name__ :List[str] = '''vqa2-id2label.json'''
__magic_name__ :Optional[Any] = json.load(open(hf_hub_download(snake_case, snake_case, repo_type='''dataset''' ), '''r''' ) )
__magic_name__ :List[Any] = {int(snake_case ): v for k, v in idalabel.items()}
__magic_name__ :Dict = idalabel
__magic_name__ :Tuple = {v: k for k, v in idalabel.items()}
__magic_name__ :List[str] = ViltForQuestionAnswering(snake_case )
elif "nlvr" in checkpoint_url:
__magic_name__ :Optional[Any] = True
__magic_name__ :List[Any] = 2
__magic_name__ :Tuple = {0: '''False''', 1: '''True'''}
__magic_name__ :List[Any] = {v: k for k, v in config.idalabel.items()}
__magic_name__ :Any = 3
__magic_name__ :Optional[int] = ViltForImagesAndTextClassification(snake_case )
elif "irtr" in checkpoint_url:
__magic_name__ :List[str] = True
__magic_name__ :Tuple = ViltForImageAndTextRetrieval(snake_case )
elif "mlm_itm" in checkpoint_url:
__magic_name__ :Dict = True
__magic_name__ :List[Any] = ViltForMaskedLM(snake_case )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
__magic_name__ :Optional[Any] = torch.hub.load_state_dict_from_url(snake_case, map_location='''cpu''' )['''state_dict''']
__magic_name__ :str = create_rename_keys(snake_case, snake_case, snake_case, snake_case )
for src, dest in rename_keys:
rename_key(snake_case, snake_case, snake_case )
read_in_q_k_v(snake_case, snake_case )
if mlm_model or irtr_model:
__magic_name__ :int = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(snake_case, snake_case )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__magic_name__ , __magic_name__ :Union[str, Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(snake_case )
# Define processor
__magic_name__ :Optional[int] = ViltImageProcessor(size=3_8_4 )
__magic_name__ :Tuple = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__magic_name__ :Any = ViltProcessor(snake_case, snake_case )
# Forward pass on example inputs (image + text)
if nlvr_model:
__magic_name__ :Dict = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''', stream=snake_case ).raw )
__magic_name__ :Tuple = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''', stream=snake_case ).raw )
__magic_name__ :Union[str, Any] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
__magic_name__ :str = processor(snake_case, snake_case, return_tensors='''pt''' )
__magic_name__ :str = processor(snake_case, snake_case, return_tensors='''pt''' )
__magic_name__ :Union[str, Any] = model(
input_ids=encoding_a.input_ids, pixel_values=encoding_a.pixel_values, pixel_values_a=encoding_a.pixel_values, )
else:
__magic_name__ :Dict = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''', stream=snake_case ).raw )
if mlm_model:
__magic_name__ :int = '''a bunch of [MASK] laying on a [MASK].'''
else:
__magic_name__ :Union[str, Any] = '''How many cats are there?'''
__magic_name__ :Any = processor(snake_case, snake_case, return_tensors='''pt''' )
__magic_name__ :List[str] = model(**snake_case )
# Verify outputs
if mlm_model:
__magic_name__ :Any = torch.Size([1, 1_1, 3_0_5_2_2] )
__magic_name__ :Any = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], snake_case, atol=1E-4 )
# verify masked token prediction equals "cats"
__magic_name__ :List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__magic_name__ :Union[str, Any] = torch.Size([1, 3_1_2_9] )
__magic_name__ :Union[str, Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3], snake_case, atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], snake_case, atol=1E-4 )
# verify vqa prediction equals "2"
__magic_name__ :Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__magic_name__ :List[Any] = torch.Size([1, 2] )
__magic_name__ :Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3], snake_case, atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 0 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
UpperCAmelCase_ = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 82 | 0 |
__snake_case = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__snake_case = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__snake_case = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 1 |
"""simple docstring"""
lowerCamelCase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
UpperCAmelCase_ = Stack()
UpperCAmelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCAmelCase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCAmelCase__ )
elif i == ")":
# RULE 4
UpperCAmelCase_ = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operators[opr](lowerCAmelCase__ , lowerCAmelCase__ )
operand_stack.push(lowerCAmelCase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :List[Any] , _snake_case :Optional[Any] ) -> List[str]:
_A = UniSpeechSatForSequenceClassification.from_pretrained(_snake_case , config=_snake_case )
_A = downstream_dict['''projector.weight''']
_A = downstream_dict['''projector.bias''']
_A = downstream_dict['''model.post_net.linear.weight''']
_A = downstream_dict['''model.post_net.linear.bias''']
return model
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :Union[str, Any] , _snake_case :List[str] ) -> str:
_A = UniSpeechSatForAudioFrameClassification.from_pretrained(_snake_case , config=_snake_case )
_A = downstream_dict['''model.linear.weight''']
_A = downstream_dict['''model.linear.bias''']
return model
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :str , _snake_case :str ) -> List[str]:
_A = UniSpeechSatForXVector.from_pretrained(_snake_case , config=_snake_case )
_A = downstream_dict['''connector.weight''']
_A = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_A = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_A = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_A = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
_A = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
_A = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
_A = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
_A = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :List[str] , _snake_case :Union[str, Any] , _snake_case :List[str] ) -> Any:
_A = torch.load(_snake_case , map_location='''cpu''' )
_A = checkpoint['''Downstream''']
_A = UniSpeechSatConfig.from_pretrained(_snake_case )
_A = WavaVecaFeatureExtractor.from_pretrained(
_snake_case , return_attention_mask=_snake_case , do_normalize=_snake_case )
_A = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
_A = convert_classification(_snake_case , _snake_case , _snake_case )
elif arch.endswith('''ForAudioFrameClassification''' ):
_A = convert_diarization(_snake_case , _snake_case , _snake_case )
elif arch.endswith('''ForXVector''' ):
_A = convert_xvector(_snake_case , _snake_case , _snake_case )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_A = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
UpperCAmelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 2 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def a__ ( lowerCAmelCase__ = 35 ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
lowerCAmelCase : Dict = TypeVar('T')
class SCREAMING_SNAKE_CASE__ ( Generic[T]):
def __init__( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = data
UpperCamelCase = self
UpperCamelCase = 0
class SCREAMING_SNAKE_CASE__ ( Generic[T]):
def __init__( self )-> None:
'''simple docstring'''
UpperCamelCase = {}
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = DisjointSetTreeNode(A_ )
def UpperCAmelCase_ ( self , A_ )-> DisjointSetTreeNode[T]:
'''simple docstring'''
UpperCamelCase = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ ( self , A_ , A_ )-> None:
'''simple docstring'''
if nodea.rank > nodea.rank:
UpperCamelCase = nodea
else:
UpperCamelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ ( self , A_ , A_ )-> None:
'''simple docstring'''
self.link(self.find_set(A_ ) , self.find_set(A_ ) )
class SCREAMING_SNAKE_CASE__ ( Generic[T]):
def __init__( self )-> None:
'''simple docstring'''
UpperCamelCase = {}
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
if node not in self.connections:
UpperCamelCase = {}
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> None:
'''simple docstring'''
self.add_node(A_ )
self.add_node(A_ )
UpperCamelCase = weight
UpperCamelCase = weight
def UpperCAmelCase_ ( self )-> GraphUndirectedWeighted[T]:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda A_ : x[2] )
# creating the disjoint set
UpperCamelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(A_ )
# MST generation
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase , UpperCamelCase , UpperCamelCase = edges[index]
index += 1
UpperCamelCase = disjoint_set.find_set(A_ )
UpperCamelCase = disjoint_set.find_set(A_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(A_ , A_ , A_ )
disjoint_set.union(A_ , A_ )
return graph
| 3 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a :
def __init__( self , _snake_case , _snake_case=99 , _snake_case=13 , _snake_case=7 , _snake_case=9 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case=8 , _snake_case=0.1 , _snake_case=0.002 , _snake_case=1 , _snake_case=0 , _snake_case=0 , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = encoder_seq_length
lowerCAmelCase = decoder_seq_length
# For common tests
lowerCAmelCase = self.decoder_seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = d_ff
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = dropout_rate
lowerCAmelCase = initializer_factor
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = decoder_start_token_id
lowerCAmelCase = None
lowerCAmelCase = decoder_layers
def UpperCamelCase__ ( self ):
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base' )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_snake_case )
if decoder_head_mask is None:
lowerCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
if cross_attn_head_mask is None:
lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase = self.get_config()
lowerCAmelCase = config.num_attention_heads
lowerCAmelCase = self.prepare_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, input_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = UMTaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
input_ids=_snake_case , decoder_input_ids=_snake_case , attention_mask=_snake_case , decoder_attention_mask=_snake_case , )
lowerCAmelCase = model(input_ids=_snake_case , decoder_input_ids=_snake_case )
lowerCAmelCase = result.last_hidden_state
lowerCAmelCase = result.past_key_values
lowerCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = UMTaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval()
# first forward pass
lowerCAmelCase = model(_snake_case , use_cache=_snake_case )
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case , use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
lowerCAmelCase ,lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = model(_snake_case )['last_hidden_state']
lowerCAmelCase = model(_snake_case , past_key_values=_snake_case )['last_hidden_state']
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = UMTaModel(config=_snake_case ).to(_snake_case ).half().eval()
lowerCAmelCase = model(**_snake_case )['last_hidden_state']
self.parent.assertFalse(torch.isnan(_snake_case ).any().item() )
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case__ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = True
snake_case__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case__ = [0.8, 0.9]
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = UMTaModel(config_and_inputs[0] ).to(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=_snake_case , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs[0]
lowerCAmelCase = UMTaForConditionalGeneration(_snake_case ).eval()
model.to(_snake_case )
lowerCAmelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=_snake_case ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_snake_case ),
}
for attn_name, (name, mask) in zip(_snake_case , head_masking.items() ):
lowerCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=_snake_case )
lowerCAmelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=_snake_case , return_dict_in_generate=_snake_case , **_snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=_snake_case ).to(_snake_case )
lowerCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=_snake_case , legacy=_snake_case )
lowerCAmelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCAmelCase = tokenizer(_snake_case , return_tensors='pt' , padding=_snake_case ).input_ids
# fmt: off
lowerCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_snake_case , _snake_case )
lowerCAmelCase = model.generate(input_ids.to(_snake_case ) )
lowerCAmelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCAmelCase = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 4 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''vit'''
def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=16 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = encoder_stride
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 82 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if number < 0:
return False
_lowerCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : int=30 , _UpperCAmelCase : Tuple=400 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 20, "width": 20}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_convert_rgb
UpperCAmelCase_ = [512, 1024, 2048, 4096]
UpperCAmelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ = 2048
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
UpperCAmelCase_ = "Hello"
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ = 3
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 82 | 0 |
_lowerCamelCase = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_lowerCamelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
_lowerCamelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
} | 6 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def a__ ( lowerCAmelCase__ ):
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = get_config(lowerCAmelCase__ )
# load original model from timm
UpperCAmelCase_ = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(lowerCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_ = processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase__ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 | 0 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _snake_case ( _snake_case : int = 3 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
_A = QuantumRegister(_snake_case , 'qr' )
_A = ClassicalRegister(_snake_case , 'cr' )
_A = QuantumCircuit(_snake_case , _snake_case )
_A = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
_A = Aer.get_backend('qasm_simulator' )
_A = execute(_snake_case , _snake_case , shots=1_00_00 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 7 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase_ = list(accumulate(lowerCAmelCase__ ) )
UpperCAmelCase_ = bisect(lowerCAmelCase__ , lowerCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int = 10 ) -> str:
if not isinstance(__snake_case , __snake_case ) or n < 0:
raise ValueError('Invalid input' )
__A : Optional[Any] = 10**n
__A : List[str] = 2_84_33 * (pow(2 , 7_83_04_57 , __snake_case )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""") | 8 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase = None
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( lowerCAmelCase__ ):
def remove_articles(lowerCAmelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def a__ ( lowerCAmelCase__ ):
if not s:
return []
return normalize_answer(lowerCAmelCase__ ).split()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = collections.Counter(lowerCAmelCase__ ) & collections.Counter(lowerCAmelCase__ )
UpperCAmelCase_ = sum(common.values() )
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["id"]
UpperCAmelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if not qid_list:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
plt.step(lowerCAmelCase__ , lowerCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCAmelCase__ , lowerCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase__ )
plt.savefig(lowerCAmelCase__ )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(lowerCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(lowerCAmelCase__ )
if i == len(lowerCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase__ )
recalls.append(lowerCAmelCase__ )
if out_image:
plot_pr_curve(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if out_image_dir and not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ = {k: float(lowerCAmelCase__ ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_exact" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_f1" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_oracle" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(lowerCAmelCase__ ) / float(len(lowerCAmelCase__ ) )
plt.hist(lowerCAmelCase__ , weights=lowerCAmelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase__ ), best_thresh
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def a__ ( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
UpperCAmelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(lowerCAmelCase__ ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
else:
print(json.dumps(lowerCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 82 | 0 |
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float(moles / volume ) * nfactor )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any , _A : str ):
_UpperCamelCase = 3
_UpperCamelCase = 250
_UpperCamelCase = ids_tensor((batch_size, length) , _A )
_UpperCamelCase = torch.ones((batch_size, length) , device=_A , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = MaxLengthCriteria(max_length=10 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
_UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : Any ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_A ) , 1 )
| 10 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase = 6_378_137.0
lowerCamelCase = 6_356_752.314_245
lowerCamelCase = 6_378_137
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase_ = haversine_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase_ = (b_lata + b_lata) / 2
UpperCAmelCase_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase_ = (sin(lowerCAmelCase__ ) ** 2) * (cos(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = cos(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma - sin(lowerCAmelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase_ = (cos(lowerCAmelCase__ ) ** 2) * (sin(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = sin(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma + sin(lowerCAmelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCAmelCase (__A , __A=0):
"""simple docstring"""
return sorted(__A , key=lambda __A: x[column])
def lowerCAmelCase (__A , __A , __A=float('''inf''')):
"""simple docstring"""
for i in range(points_counts - 1):
for j in range(i + 1 , __A):
_a = euclidean_distance_sqr(points[i] , points[j])
if current_dis < min_dis:
_a = current_dis
return min_dis
def lowerCAmelCase (__A , __A , __A=float('''inf''')):
"""simple docstring"""
for i in range(min(6 , points_counts - 1) , __A):
for j in range(max(0 , i - 6) , __A):
_a = euclidean_distance_sqr(points[i] , points[j])
if current_dis < min_dis:
_a = current_dis
return min_dis
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(__A , __A)
# recursion
_a = points_counts // 2
_a = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A)
_a = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid)
_a = min(__A , __A)
_a = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis:
cross_strip.append(__A)
_a = dis_between_closest_in_strip(
__A , len(__A) , __A)
return min(__A , __A)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = column_based_sort(__A , column=0)
_a = column_based_sort(__A , column=1)
return (
closest_pair_of_points_sqr(
__A , __A , __A)
) ** 0.5
if __name__ == "__main__":
lowercase_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 11 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = 300
return config
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = MraModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = MraModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = ()
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : List[str] = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase = 50_000
lowerCamelCase = 5_000
lowerCamelCase , lowerCamelCase = os.path.split(__file__)
lowerCamelCase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
def a__ ( ):
UpperCAmelCase_ = {"num examples": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
UpperCAmelCase_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
UpperCAmelCase_ = generate_example_dataset(
os.path.join(lowerCAmelCase__ , "dataset.arrow" ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print("shuffling dataset" )
UpperCAmelCase_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , "wb" ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict:
for param in module.parameters():
__lowerCamelCase : Dict = False
def UpperCAmelCase__ ( ) -> Optional[Any]:
__lowerCamelCase : List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__lowerCamelCase : str = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> List[str]:
__lowerCamelCase : str = plt.imshow(UpperCAmelCase_ )
fig.axes.get_xaxis().set_visible(UpperCAmelCase_ )
fig.axes.get_yaxis().set_visible(UpperCAmelCase_ )
plt.show()
def UpperCAmelCase__ ( ) -> List[Any]:
__lowerCamelCase : str = datetime.now()
__lowerCamelCase : Optional[Any] = current_time.strftime('%H:%M:%S' )
return timestamp
| 13 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''image''': Image()} )
UpperCamelCase = Features({'''labels''': ClassLabel} )
UpperCamelCase = "image"
UpperCamelCase = "labels"
def lowercase__ ( self : str , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowercase__ ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 82 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a ) -> Optional[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
_a : List[str] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 1 , _a = None , _a = 0.0 , _a = 5_0 , _a = None , _a = "pil" , _a = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _a ):
_a : str = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_a : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_a )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_a : int = randn_tensor(_a , generator=_a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_a : Any = self.unet(_a , _a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_a : int = self.scheduler.step(
_a , _a , _a , eta=_a , use_clipped_model_output=_a , generator=_a ).prev_sample
_a : int = (image / 2 + 0.5).clamp(0 , 1 )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a : List[str] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 14 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A : Optional[Any] = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
require_version(deps[pkg] , __magic_name__ )
| 15 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ = 20 ):
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = CanineTokenizer
lowerCamelCase__ = False
def _snake_case ( self : Tuple ):
super().setUp()
SCREAMING_SNAKE_CASE = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self : str ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def _snake_case ( self : Optional[int] , **__lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = 1024
return tokenizer
@require_torch
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.canine_tokenizer
SCREAMING_SNAKE_CASE = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
SCREAMING_SNAKE_CASE = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.canine_tokenizer
SCREAMING_SNAKE_CASE = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.canine_tokenizer
SCREAMING_SNAKE_CASE = [
"What's the weater?",
"It's about 25 degrees.",
]
SCREAMING_SNAKE_CASE = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _snake_case ( self : List[Any] ):
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
SCREAMING_SNAKE_CASE = chr(0xE_007 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE = 0xE_005
SCREAMING_SNAKE_CASE = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
SCREAMING_SNAKE_CASE = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE = chr(0xE_005 )
SCREAMING_SNAKE_CASE = chr(0xE_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE = 0xE_006
SCREAMING_SNAKE_CASE = chr(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE = 0xE_006
SCREAMING_SNAKE_CASE = chr(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [new_token_a]
SCREAMING_SNAKE_CASE = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
SCREAMING_SNAKE_CASE = 0xE_007
SCREAMING_SNAKE_CASE = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE = "hello world"
if self.space_between_special_tokens:
SCREAMING_SNAKE_CASE = "[CLS] hello world [SEP]"
else:
SCREAMING_SNAKE_CASE = input
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
SCREAMING_SNAKE_CASE = "a"
SCREAMING_SNAKE_CASE = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
SCREAMING_SNAKE_CASE = 0xE_006
SCREAMING_SNAKE_CASE = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Any ):
pass
def _snake_case ( self : Optional[Any] ):
pass
def _snake_case ( self : Tuple ):
pass
def _snake_case ( self : Tuple ):
pass
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Any ):
pass | 16 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
else:
UpperCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
UpperCAmelCase_ = ["key_proj", "value_proj", "query_proj"]
UpperCAmelCase_ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCAmelCase_ = key.split("." )
if attributes[0] == "lm_head":
UpperCAmelCase_ = prophet
UpperCAmelCase_ = prophet_old
else:
UpperCAmelCase_ = prophet.prophetnet
UpperCAmelCase_ = prophet_old.model
UpperCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase_ = mapping[attribute]
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = attribute
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
UpperCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.bias
logger.info(f"""{attribute} is initialized""" )
UpperCAmelCase_ = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase__ , "in_proj_weight" ):
UpperCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase_ = True
break
if attribute.isdigit():
UpperCAmelCase_ = model[int(lowerCAmelCase__ )]
UpperCAmelCase_ = old_model[int(lowerCAmelCase__ )]
else:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_attribute == "":
UpperCAmelCase_ = old_model
else:
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 82 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Union[str, Any] = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ['''YolosFeatureExtractor''']
UpperCAmelCase_ : Tuple = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(lowerCAmelCase__ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) <= 1:
return arr, 0
UpperCAmelCase_ = len(lowerCAmelCase__ ) // 2
UpperCAmelCase_ = arr[0:mid]
UpperCAmelCase_ = arr[mid:]
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = _count_cross_inversions(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = 0
while i < len(lowerCAmelCase__ ) and j < len(lowerCAmelCase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a__ ( ):
UpperCAmelCase_ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowerCAmelCase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase__ )
# an empty list should also have zero inversions
UpperCAmelCase_ = []
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 82 | 0 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = checkpoint
_lowerCAmelCase = {}
_lowerCAmelCase = vae_state_dict["encoder.conv_in.weight"]
_lowerCAmelCase = vae_state_dict["encoder.conv_in.bias"]
_lowerCAmelCase = vae_state_dict["encoder.conv_out.weight"]
_lowerCAmelCase = vae_state_dict["encoder.conv_out.bias"]
_lowerCAmelCase = vae_state_dict["encoder.norm_out.weight"]
_lowerCAmelCase = vae_state_dict["encoder.norm_out.bias"]
_lowerCAmelCase = vae_state_dict["decoder.conv_in.weight"]
_lowerCAmelCase = vae_state_dict["decoder.conv_in.bias"]
_lowerCAmelCase = vae_state_dict["decoder.conv_out.weight"]
_lowerCAmelCase = vae_state_dict["decoder.conv_out.bias"]
_lowerCAmelCase = vae_state_dict["decoder.norm_out.weight"]
_lowerCAmelCase = vae_state_dict["decoder.norm_out.bias"]
_lowerCAmelCase = vae_state_dict["quant_conv.weight"]
_lowerCAmelCase = vae_state_dict["quant_conv.bias"]
_lowerCAmelCase = vae_state_dict["post_quant_conv.weight"]
_lowerCAmelCase = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
_lowerCAmelCase = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
_lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
# Retrieves the keys for the decoder up blocks only
_lowerCAmelCase = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
_lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ )
}
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key]
if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_lowerCAmelCase = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.weight''' )
_lowerCAmelCase = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.bias''' )
_lowerCAmelCase = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = {"old": F'''down.{i}.block''', "new": F'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = [key for key in vae_state_dict if "encoder.mid.block" in key]
_lowerCAmelCase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCAmelCase = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key]
_lowerCAmelCase = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = [key for key in vae_state_dict if "encoder.mid.attn" in key]
_lowerCAmelCase = renew_vae_attention_paths(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
conv_attn_to_linear(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = num_up_blocks - 1 - i
_lowerCAmelCase = [
key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key
]
if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_lowerCAmelCase = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.weight'''
]
_lowerCAmelCase = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.bias'''
]
_lowerCAmelCase = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = {"old": F'''up.{block_id}.block''', "new": F'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = [key for key in vae_state_dict if "decoder.mid.block" in key]
_lowerCAmelCase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCAmelCase = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key]
_lowerCAmelCase = renew_vae_resnet_paths(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = [key for key in vae_state_dict if "decoder.mid.attn" in key]
_lowerCAmelCase = renew_vae_attention_paths(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ )
conv_attn_to_linear(SCREAMING_SNAKE_CASE_ )
return new_checkpoint
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , ):
'''simple docstring'''
_lowerCAmelCase = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
_lowerCAmelCase = io.BytesIO(r.content )
_lowerCAmelCase = OmegaConf.load(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = 512
_lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
_lowerCAmelCase = {}
with safe_open(SCREAMING_SNAKE_CASE_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
_lowerCAmelCase = f.get_tensor(SCREAMING_SNAKE_CASE_ )
else:
_lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location=SCREAMING_SNAKE_CASE_ )["state_dict"]
# Convert the VAE model.
_lowerCAmelCase = create_vae_diffusers_config(SCREAMING_SNAKE_CASE_ , image_size=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = custom_convert_ldm_vae_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = AutoencoderKL(**SCREAMING_SNAKE_CASE_ )
vae.load_state_dict(SCREAMING_SNAKE_CASE_ )
vae.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 18 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase_ = len(bin(lowerCAmelCase__ )[3:] )
UpperCAmelCase_ = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case = 1, __snake_case = 1, __snake_case = 1.0e4, __snake_case = False, __snake_case = 1.0, ) -> jnp.ndarray:
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
_UpperCamelCase = float(embedding_dim // 2 )
_UpperCamelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_UpperCamelCase = min_timescale * jnp.exp(jnp.arange(__snake_case, dtype=jnp.floataa ) * -log_timescale_increment )
_UpperCamelCase = jnp.expand_dims(__snake_case, 1 ) * jnp.expand_dims(__snake_case, 0 )
# scale embeddings
_UpperCamelCase = scale * emb
if flip_sin_to_cos:
_UpperCamelCase = jnp.concatenate([jnp.cos(__snake_case ), jnp.sin(__snake_case )], axis=1 )
else:
_UpperCamelCase = jnp.concatenate([jnp.sin(__snake_case ), jnp.cos(__snake_case )], axis=1 )
_UpperCamelCase = jnp.reshape(__snake_case, [jnp.shape(__snake_case )[0], embedding_dim] )
return signal
class _UpperCAmelCase( nn.Module ):
lowercase__ = 32
lowercase__ = jnp.floataa
@nn.compact
def __call__( self , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''')(__a)
_UpperCamelCase = nn.silu(__a)
_UpperCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''')(__a)
return temb
class _UpperCAmelCase( nn.Module ):
lowercase__ = 32
lowercase__ = False
lowercase__ = 1
@nn.compact
def __call__( self , __a) -> Optional[Any]:
'''simple docstring'''
return get_sinusoidal_embeddings(
__a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 19 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **_UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(_UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , _UpperCAmelCase : Union[str, List[str]] = None , **_UpperCAmelCase : Optional[int] , ) -> List[Any]:
'''simple docstring'''
if "text_queries" in kwargs:
UpperCAmelCase_ = kwargs.pop("text_queries" )
if isinstance(_UpperCAmelCase , (str, Image.Image) ):
UpperCAmelCase_ = {"image": image, "candidate_labels": candidate_labels}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def lowercase__ ( self : str , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {}
if "threshold" in kwargs:
UpperCAmelCase_ = kwargs["threshold"]
if "top_k" in kwargs:
UpperCAmelCase_ = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self : int , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = load_image(inputs["image"] )
UpperCAmelCase_ = inputs["candidate_labels"]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = candidate_labels.split("," )
UpperCAmelCase_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : int , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = model_inputs.pop("target_size" )
UpperCAmelCase_ = model_inputs.pop("candidate_label" )
UpperCAmelCase_ = model_inputs.pop("is_last" )
UpperCAmelCase_ = self.model(**_UpperCAmelCase )
UpperCAmelCase_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[str]=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
for model_output in model_outputs:
UpperCAmelCase_ = model_output["candidate_label"]
UpperCAmelCase_ = BaseModelOutput(_UpperCAmelCase )
UpperCAmelCase_ = self.image_processor.post_process_object_detection(
outputs=_UpperCAmelCase , threshold=_UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
UpperCAmelCase_ = outputs["scores"][index].item()
UpperCAmelCase_ = self._get_bounding_box(outputs["boxes"][index][0] )
UpperCAmelCase_ = {"score": score, "label": label, "box": box}
results.append(_UpperCAmelCase )
UpperCAmelCase_ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )
if top_k:
UpperCAmelCase_ = results[:top_k]
return results
def lowercase__ ( self : str , _UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = box.int().tolist()
UpperCAmelCase_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowerCAmelCase: List[Any] = trt.Logger(trt.Logger.WARNING)
_lowerCAmelCase: Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowerCAmelCase: int = logging.getLogger(__name__)
_lowerCAmelCase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
_lowerCAmelCase: Any = parser.parse_args()
if args.tokenizer_name:
_lowerCAmelCase: Any = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
_lowerCAmelCase: Dict = args.per_device_eval_batch_size
_lowerCAmelCase: str = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowerCAmelCase: Optional[int] = True
_lowerCAmelCase: Optional[int] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
_lowerCAmelCase: Tuple = 'temp_engine/bert-fp16.engine'
if args.inta:
_lowerCAmelCase: Optional[Any] = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
_lowerCAmelCase: int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowerCAmelCase: Optional[Any] = [network.get_input(i) for i in range(network.num_inputs)]
_lowerCAmelCase: Optional[int] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowerCAmelCase: Union[str, Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowerCAmelCase: Union[str, Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowerCAmelCase: str = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _lowercase( __a : Tuple , __a : Optional[int] , __a : Dict , __a : Optional[Any] , __a : Tuple , __a : Union[str, Any] , __a : int , __a : Any ):
a__ =np.asarray(inputs['input_ids'] , dtype=np.intaa )
a__ =np.asarray(inputs['attention_mask'] , dtype=np.intaa )
a__ =np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __a )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __a )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __a )
# start time
a__ =time.time()
# Run inference
context.execute_async(
bindings=[int(__a ) for d_inp in d_inputs] + [int(__a ), int(__a )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__a , __a , __a )
cuda.memcpy_dtoh_async(__a , __a , __a )
# Synchronize the stream and take time
stream.synchronize()
# end time
a__ =time.time()
a__ =end_time - start_time
a__ =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowerCAmelCase: Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCAmelCase: str = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowerCAmelCase: Tuple = raw_datasets['validation'].column_names
_lowerCAmelCase: List[str] = 'question' if 'question' in column_names else column_names[0]
_lowerCAmelCase: Union[str, Any] = 'context' if 'context' in column_names else column_names[1]
_lowerCAmelCase: Union[str, Any] = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowerCAmelCase: Optional[Any] = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
_lowerCAmelCase: Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def _lowercase( __a : List[str] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
a__ =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
a__ =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=__a , stride=args.doc_stride , return_overflowing_tokens=__a , return_offsets_mapping=__a , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
a__ =tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
a__ =[]
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
a__ =tokenized_examples.sequence_ids(__a )
a__ =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
a__ =sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
a__ =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
_lowerCAmelCase: List[Any] = raw_datasets['validation']
# Validation Feature Creation
_lowerCAmelCase: Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
_lowerCAmelCase: List[str] = default_data_collator
_lowerCAmelCase: Dict = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
_lowerCAmelCase: Tuple = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowercase( __a : int , __a : Optional[int] , __a : List[Any] , __a : Optional[Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
a__ =postprocess_qa_predictions(
examples=__a , features=__a , predictions=__a , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__a , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
a__ =[
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
a__ =[{'id': k, 'prediction_text': v} for k, v in predictions.items()]
a__ =[{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__a , label_ids=__a )
_lowerCAmelCase: List[str] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowercase( __a : Tuple ):
return trt.volume(engine.get_binding_shape(__a ) ) * engine.get_binding_dtype(__a ).itemsize
# Allocate device memory for inputs and outputs.
_lowerCAmelCase: Optional[Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowerCAmelCase: Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowerCAmelCase: Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowerCAmelCase: Tuple = cuda.mem_alloc(h_outputa.nbytes)
_lowerCAmelCase: Any = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowerCAmelCase: str = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
_lowerCAmelCase: Any = 0.0
_lowerCAmelCase: str = 0
_lowerCAmelCase: Dict = timeit.default_timer()
_lowerCAmelCase: str = None
for step, batch in enumerate(eval_dataloader):
_lowerCAmelCase , _lowerCAmelCase: Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowerCAmelCase , _lowerCAmelCase: str = outputs
_lowerCAmelCase: List[Any] = torch.tensor(start_logits)
_lowerCAmelCase: str = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowerCAmelCase: Dict = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_lowerCAmelCase: List[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_lowerCAmelCase: Optional[int] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowerCAmelCase: List[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_lowerCAmelCase: Union[str, Any] = nested_truncate(all_preds, len(eval_dataset))
_lowerCAmelCase: Union[str, Any] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
_lowerCAmelCase: Tuple = post_processing_function(eval_examples, eval_dataset, all_preds)
_lowerCAmelCase: Union[str, Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 20 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=30 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Dict=None , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel(config=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
UpperCAmelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase__ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , tf.keras.layers.Layer ) )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 )
| 82 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( UpperCamelCase__ ):
UpperCamelCase = """"""
UpperCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
UpperCamelCase = None # compression type in fsspec. ex: "gzip"
UpperCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self :Any , __snake_case :str = "" , __snake_case :Optional[str] = None , __snake_case :Optional[dict] = None , **__snake_case :Dict ):
'''simple docstring'''
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__magic_name__ : Any =fsspec.open(
__snake_case , mode="""rb""" , protocol=__snake_case , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__magic_name__ : Union[str, Any] =os.path.basename(self.file.path.split("""::""" )[0] )
__magic_name__ : Optional[int] =(
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
__magic_name__ : Optional[Any] =None
@classmethod
def A__ ( cls :Tuple , __snake_case :str ):
'''simple docstring'''
return super()._strip_protocol(__snake_case ).lstrip("""/""" )
def A__ ( self :Any ):
'''simple docstring'''
if self.dir_cache is None:
__magic_name__ : Optional[Any] ={**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
__magic_name__ : Optional[Any] ={f["""name"""]: f}
def A__ ( self :Any , __snake_case :str ):
'''simple docstring'''
return self.file.open().read()
def A__ ( self :List[Any] , __snake_case :str , __snake_case :str = "rb" , __snake_case :str=None , __snake_case :List[Any]=True , __snake_case :Any=None , **__snake_case :Any , ):
'''simple docstring'''
__magic_name__ : Tuple =self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __A ( UpperCamelCase__ ):
UpperCamelCase = """bz2"""
UpperCamelCase = """bz2"""
UpperCamelCase = """.bz2"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """gzip"""
UpperCamelCase = """gzip"""
UpperCamelCase = """.gz"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """lz4"""
UpperCamelCase = """lz4"""
UpperCamelCase = """.lz4"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """xz"""
UpperCamelCase = """xz"""
UpperCamelCase = """.xz"""
class __A ( UpperCamelCase__ ):
UpperCamelCase = """zstd"""
UpperCamelCase = """zstd"""
UpperCamelCase = """.zst"""
def __init__( self :Optional[Any] , __snake_case :str , __snake_case :str = "rb" , __snake_case :Optional[str] = None , __snake_case :Optional[dict] = None , __snake_case :int = DEFAULT_BLOCK_SIZE , **__snake_case :List[Any] , ):
'''simple docstring'''
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__magic_name__ : Dict =self.file.__enter__
class __A :
def __init__( self :List[Any] , __snake_case :List[Any] ):
'''simple docstring'''
__magic_name__ : int =file_
def __enter__( self :Dict ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self :Dict , *__snake_case :str , **__snake_case :Any ):
'''simple docstring'''
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self :Tuple ):
'''simple docstring'''
return iter(self._file )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return next(self._file )
def __getattr__( self :Any , __snake_case :Optional[Any] ):
'''simple docstring'''
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case :Union[str, Any] , **__snake_case :int ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
__magic_name__ : List[str] =fixed_enter
| 21 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowerCamelCase = {
"""facebook/bart-base""": 1_024,
"""facebook/bart-large""": 1_024,
"""facebook/bart-large-mnli""": 1_024,
"""facebook/bart-large-cnn""": 1_024,
"""facebook/bart-large-xsum""": 1_024,
"""yjernite/bart_eli5""": 1_024,
}
@lru_cache()
def a__ ( ):
UpperCAmelCase_ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCAmelCase_ = bs[:]
UpperCAmelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ = char
return pairs
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]="replace" , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : List[Any]="<mask>" , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : Dict , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = errors # how to handle errors in decoding
UpperCAmelCase_ = bytes_to_unicode()
UpperCAmelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase_ = {}
UpperCAmelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ = tuple(_UpperCAmelCase )
UpperCAmelCase_ = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase_ = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ = bigram
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while i < len(_UpperCAmelCase ):
try:
UpperCAmelCase_ = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ = tuple(_UpperCAmelCase )
UpperCAmelCase_ = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase_ = get_pairs(_UpperCAmelCase )
UpperCAmelCase_ = " ".join(_UpperCAmelCase )
UpperCAmelCase_ = word
return word
def lowercase__ ( self : Dict , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = []
for token in re.findall(self.pat , _UpperCAmelCase ):
UpperCAmelCase_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(" " ) )
return bpe_tokens
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "".join(_UpperCAmelCase )
UpperCAmelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + "\n" )
UpperCAmelCase_ = 0
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(" ".join(_UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowercase__ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
UpperCAmelCase_ = " " + text
return (text, kwargs)
| 82 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A :
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Union[str, Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=99 , lowerCAmelCase_ : Any=36 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : int=37 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Tuple=5_12 , lowerCAmelCase_ : List[Any]=16 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[int]=0.0_2 , lowerCAmelCase_ : List[str]=6 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=10_00 , ) -> List[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = patch_size
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = coordinate_size
_a = shape_size
_a = num_labels
_a = num_choices
_a = scope
_a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_a = text_seq_length
_a = (image_size // patch_size) ** 2 + 1
_a = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_a = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a = bbox[i, j, 3]
_a = bbox[i, j, 1]
_a = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_a = bbox[i, j, 2]
_a = bbox[i, j, 0]
_a = tmp_coordinate
_a = tf.constant(lowerCAmelCase_ )
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.text_seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
_a = TFLayoutLMvaModel(config=lowerCAmelCase_ )
# text + image
_a = model(lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
_a = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , training=lowerCAmelCase_ , )
_a = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_a = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_a = model({'''pixel_values''': pixel_values} , training=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a = self.num_labels
_a = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase_ )
_a = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
_a = self.num_labels
_a = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase_ )
_a = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> int:
"""simple docstring"""
_a = 2
_a = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase_ )
_a = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , training=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
((_a) , (_a) , (_a) , (_a) , (_a) , (_a) , (_a) , (_a)) = config_and_inputs
_a = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase_ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return True
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str=False ) -> dict:
"""simple docstring"""
_a = copy.deepcopy(lowerCAmelCase_ )
if model_class in get_values(lowerCAmelCase_ ):
_a = {
k: tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCAmelCase_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_a = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
_a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
_a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase_ ):
_a = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_a = TFLayoutLMvaModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
if getattr(lowerCAmelCase_ , '''hf_compute_loss''' , lowerCAmelCase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
_a = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_a = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase_ )[0]
]
_a = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_a = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_a = prepared_for_class.pop('''input_ids''' )
_a = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_a = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_a = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
_a = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_a = -1_00
_a = tf.convert_to_tensor(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_a = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_a = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
# Get keys that were added with the _prepare_for_class function
_a = prepared_for_class.keys() - inputs_dict.keys()
_a = inspect.signature(model.call ).parameters
_a = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_a = {0: '''input_ids'''}
for label_key in label_keys:
_a = signature_names.index(lowerCAmelCase_ )
_a = label_key
_a = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_a = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_a = prepared_for_class[value]
_a = tuple(lowerCAmelCase_ )
# Send to model
_a = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case_ ():
'''simple docstring'''
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=lowerCAmelCase_ , return_tensors='''tf''' ).pixel_values
_a = tf.constant([[1, 2]] )
_a = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_a = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ )
# verify the logits
_a = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ )
_a = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 22 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
UpperCAmelCase_ = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 82 | 0 |
def _snake_case (__lowercase = 1000000):
UpperCamelCase_ = [i - 1 for i in range(limit + 1)]
for i in range(2 , limit + 1):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __lowercase):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1])
if __name__ == "__main__":
print(solution())
| 23 |
"""simple docstring"""
lowerCamelCase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
UpperCAmelCase_ = Stack()
UpperCAmelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCAmelCase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCAmelCase__ )
elif i == ")":
# RULE 4
UpperCAmelCase_ = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operators[opr](lowerCAmelCase__ , lowerCAmelCase__ )
operand_stack.push(lowerCAmelCase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 0 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def a__ ( lowerCAmelCase__ = 35 ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 10
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 3, 4]
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a )
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = process_story(a )
self.assertEqual(a , [] )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = process_story(a )
self.assertEqual(a , [] )
self.assertEqual(a , [] )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = process_story(a )
SCREAMING_SNAKE_CASE : Tuple = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(a , a )
SCREAMING_SNAKE_CASE : List[Any] = ["It was the best of times."]
self.assertEqual(a , a )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = torch.tensor([1, 2, 3, 4] )
SCREAMING_SNAKE_CASE : Any = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(a , 0 ).numpy() , expected.numpy() )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a , 23 ).numpy() , expected.numpy() )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a , 1 ).numpy() , expected.numpy() )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 101
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
SCREAMING_SNAKE_CASE : Tuple = compute_token_type_ids(a , a )
np.testing.assert_array_equal(a , a ) | 25 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = tmp_path / """cache"""
__snake_case : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = tmp_path / """cache"""
__snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case : Tuple = features.copy() if features else default_expected_features
__snake_case : int = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : Optional[int] = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Dict = tmp_path / """cache"""
__snake_case : Any = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
__snake_case : Dict = features.copy() if features else default_expected_features
__snake_case : Optional[int] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : Tuple = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
__snake_case : Tuple = features.copy()
__snake_case : List[str] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : int = tmp_path / """cache"""
__snake_case : List[Any] = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = tmp_path / """cache"""
__snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case : int = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [jsonl_path]
__snake_case : Optional[int] = tmp_path / """cache"""
__snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=("train",) ) -> Tuple:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
__snake_case : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = tmp_path / """cache"""
__snake_case : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case : Optional[Any] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = tmp_path / """cache"""
__snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case : int = features.copy() if features else default_expected_features
__snake_case : Any = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case : int = JsonDatasetReader({"""train""": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
if split:
__snake_case : Union[str, Any] = {split: jsonl_path}
else:
__snake_case : Any = """train"""
__snake_case : Optional[int] = {"""train""": jsonl_path, """test""": jsonl_path}
__snake_case : List[Any] = tmp_path / """cache"""
__snake_case : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__snake_case : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return json.load(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Dict:
"""simple docstring"""
return [json.loads(_lowerCamelCase ) for line in buffer]
class _A :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ ).write()
buffer.seek(0 )
__snake_case : Any = load_json_function(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
assert isinstance(exported_content[0] , __magic_name__ )
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Tuple ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , orient=__magic_name__ ).write()
buffer.seek(0 )
__snake_case : Any = load_json(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__magic_name__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , num_proc=2 ).write()
buffer.seek(0 )
__snake_case : int = load_json_function(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
assert isinstance(exported_content[0] , __magic_name__ )
assert len(__magic_name__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def lowercase__ ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , lines=__magic_name__ , orient=__magic_name__ , num_proc=2 ).write()
buffer.seek(0 )
__snake_case : List[str] = load_json(__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__magic_name__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__magic_name__ ) == 10
def lowercase__ ( self : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(__magic_name__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__magic_name__ , __magic_name__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / f'''test.json.{extension}'''
__snake_case : Union[str, Any] = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(__magic_name__ , __magic_name__ , compression=__magic_name__ ).write()
with fsspec.open(__magic_name__ , """rb""" , compression="""infer""" ) as f:
__snake_case : List[Any] = f.read()
with fsspec.open(__magic_name__ , """rb""" , compression="""infer""" ) as f:
__snake_case : Optional[Any] = f.read()
assert exported_content == original_content
| 26 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''vit'''
def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=16 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = encoder_stride
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 82 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A : Optional[Any] = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding='utf_8' ) as f:
_A = csv.reader(_SCREAMING_SNAKE_CASE )
_A = []
next(_SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(_SCREAMING_SNAKE_CASE ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_A = []
for dataset in encoded_datasets:
_A = len(_SCREAMING_SNAKE_CASE )
_A = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_A = np.zeros((n_batch, 2) , dtype=np.intaa )
_A = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_A = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_SCREAMING_SNAKE_CASE ):
_A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_A = with_conta
_A = with_conta
_A = len(_SCREAMING_SNAKE_CASE ) - 1
_A = len(_SCREAMING_SNAKE_CASE ) - 1
_A = with_conta
_A = with_conta
_A = mc_label
_A = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def __lowerCAmelCase( ) -> List[str]:
"""simple docstring"""
_A = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_SCREAMING_SNAKE_CASE , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=_SCREAMING_SNAKE_CASE , default='' )
parser.add_argument('--eval_dataset' , type=_SCREAMING_SNAKE_CASE , default='' )
parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 )
parser.add_argument('--num_train_epochs' , type=_SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument('--train_batch_size' , type=_SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument('--eval_batch_size' , type=_SCREAMING_SNAKE_CASE , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_SCREAMING_SNAKE_CASE , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=_SCREAMING_SNAKE_CASE , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=_SCREAMING_SNAKE_CASE , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=_SCREAMING_SNAKE_CASE , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=_SCREAMING_SNAKE_CASE , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('--lm_coef' , type=_SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument('--n_valid' , type=_SCREAMING_SNAKE_CASE , default=374 )
parser.add_argument('--server_ip' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
_A = parser.parse_args()
print(_SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_A = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_A = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_A = ['_start_', '_delimiter_', '_classify_']
_A = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_SCREAMING_SNAKE_CASE )
_A = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
_A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
model.to(_SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(_SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(_SCREAMING_SNAKE_CASE ) for o in obj]
logger.info('Encoding dataset...' )
_A = load_rocstories_dataset(args.train_dataset )
_A = load_rocstories_dataset(args.eval_dataset )
_A = (train_dataset, eval_dataset)
_A = tokenize_and_encode(_SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
_A = model.config.n_positions // 2 - 2
_A = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_A = min(_SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_A = pre_process_datasets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
_A, _A = tensor_datasets[0], tensor_datasets[1]
_A = TensorDataset(*_SCREAMING_SNAKE_CASE )
_A = RandomSampler(_SCREAMING_SNAKE_CASE )
_A = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
_A = TensorDataset(*_SCREAMING_SNAKE_CASE )
_A = SequentialSampler(_SCREAMING_SNAKE_CASE )
_A = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_A = args.max_steps
_A = args.max_steps // (len(_SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
_A = len(_SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
_A = list(model.named_parameters() )
_A = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_A = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_A = AdamW(_SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
_A = get_linear_schedule_with_warmup(
_SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=_SCREAMING_SNAKE_CASE )
if args.do_train:
_A, _A, _A = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
_A = 0
_A = 0
_A = tqdm(_SCREAMING_SNAKE_CASE , desc='Training' )
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
_A = tuple(t.to(_SCREAMING_SNAKE_CASE ) for t in batch )
_A, _A, _A, _A = batch
_A = model(_SCREAMING_SNAKE_CASE , mc_token_ids=_SCREAMING_SNAKE_CASE , lm_labels=_SCREAMING_SNAKE_CASE , mc_labels=_SCREAMING_SNAKE_CASE )
_A = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_A = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_A = 'Training loss: {:.2e} lr: {:.2e}'.format(_SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_A = model.module if hasattr(_SCREAMING_SNAKE_CASE , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_A = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
_A = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , _SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(_SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_A = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
_A, _A = 0, 0
_A, _A = 0, 0
for batch in tqdm(_SCREAMING_SNAKE_CASE , desc='Evaluating' ):
_A = tuple(t.to(_SCREAMING_SNAKE_CASE ) for t in batch )
_A, _A, _A, _A = batch
with torch.no_grad():
_A, _A, _A, _A = model(
_SCREAMING_SNAKE_CASE , mc_token_ids=_SCREAMING_SNAKE_CASE , lm_labels=_SCREAMING_SNAKE_CASE , mc_labels=_SCREAMING_SNAKE_CASE )
_A = mc_logits.detach().cpu().numpy()
_A = mc_labels.to('cpu' ).numpy()
_A = accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_A = eval_loss / nb_eval_steps
_A = eval_accuracy / nb_eval_examples
_A = tr_loss / nb_tr_steps if args.do_train else None
_A = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_A = os.path.join(args.output_dir , 'eval_results.txt' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 27 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : int=30 , _UpperCAmelCase : Tuple=400 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 20, "width": 20}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_convert_rgb
UpperCAmelCase_ = [512, 1024, 2048, 4096]
UpperCAmelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ = 2048
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
UpperCAmelCase_ = "Hello"
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ = 3
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 82 | 0 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = '''segformer'''
def __init__( self, A=3, A=4, A=[2, 2, 2, 2], A=[8, 4, 2, 1], A=[32, 64, 160, 256], A=[7, 3, 3, 3], A=[4, 2, 2, 2], A=[1, 2, 5, 8], A=[4, 4, 4, 4], A="gelu", A=0.0, A=0.0, A=0.1, A=0.02, A=0.1, A=1E-6, A=256, A=255, **A, ):
'''simple docstring'''
super().__init__(**A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.', A, )
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = num_encoder_blocks
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[Any] = sr_ratios
SCREAMING_SNAKE_CASE : List[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = patch_sizes
SCREAMING_SNAKE_CASE : str = strides
SCREAMING_SNAKE_CASE : List[Any] = mlp_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Any = drop_path_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = decoder_hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.get('reshape_last_stage', A )
SCREAMING_SNAKE_CASE : List[str] = semantic_loss_ignore_index
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = version.parse('''1.11''' )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1E-4
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 12
| 28 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def a__ ( lowerCAmelCase__ ):
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = get_config(lowerCAmelCase__ )
# load original model from timm
UpperCAmelCase_ = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(lowerCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_ = processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase__ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A_ = tuple[int, int]
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = vertices
lowerCamelCase_ = {
(min(UpperCAmelCase ), max(UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCamelCase_ = weight
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = Graph({min(self.vertices )} , {} )
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCamelCase_ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCamelCase_ = edge
lowerCamelCase_ = weight
subgraph.add_edge(UpperCAmelCase , UpperCAmelCase )
return subgraph
def lowercase ( lowerCAmelCase__ = "p107_network.txt" ):
lowerCamelCase_ = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = f.read().strip().split('''\n''' )
lowerCamelCase_ = [line.split(''',''' ) for line in data]
for edgea in range(1 ,len(lowerCAmelCase__ ) ):
for edgea in range(lowerCAmelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCamelCase_ = int(adjaceny_matrix[edgea][edgea] )
lowerCamelCase_ = Graph(set(range(len(lowerCAmelCase__ ) ) ) ,lowerCAmelCase__ )
lowerCamelCase_ = graph.prims_algorithm()
lowerCamelCase_ = sum(graph.edges.values() )
lowerCamelCase_ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 29 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase_ = list(accumulate(lowerCAmelCase__ ) )
UpperCAmelCase_ = bisect(lowerCAmelCase__ , lowerCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
__a = parser.parse_args()
__a = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 30 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase = None
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( lowerCAmelCase__ ):
def remove_articles(lowerCAmelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def a__ ( lowerCAmelCase__ ):
if not s:
return []
return normalize_answer(lowerCAmelCase__ ).split()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = collections.Counter(lowerCAmelCase__ ) & collections.Counter(lowerCAmelCase__ )
UpperCAmelCase_ = sum(common.values() )
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["id"]
UpperCAmelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if not qid_list:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
plt.step(lowerCAmelCase__ , lowerCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCAmelCase__ , lowerCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase__ )
plt.savefig(lowerCAmelCase__ )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(lowerCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(lowerCAmelCase__ )
if i == len(lowerCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase__ )
recalls.append(lowerCAmelCase__ )
if out_image:
plot_pr_curve(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if out_image_dir and not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ = {k: float(lowerCAmelCase__ ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_exact" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_f1" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_oracle" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(lowerCAmelCase__ ) / float(len(lowerCAmelCase__ ) )
plt.hist(lowerCAmelCase__ , weights=lowerCAmelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase__ ), best_thresh
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def a__ ( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
UpperCAmelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(lowerCAmelCase__ ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
else:
print(json.dumps(lowerCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 82 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float(moles / volume ) * nfactor )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A : Optional[str] = field(
default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A : bool = field(default=A__ , metadata={"""help""": """Whether tp freeze the encoder."""} )
__A : bool = field(default=A__ , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class __UpperCamelCase :
__A : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__A : Optional[str] = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
__A : Optional[int] = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A : Optional[int] = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A : Optional[int] = field(
default=1_42 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
__A : Optional[int] = field(
default=1_42 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
__A : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
__A : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
__A : Optional[str] = field(default=A__ , metadata={"""help""": """Source language id for translation."""} )
__A : Optional[str] = field(default=A__ , metadata={"""help""": """Target language id for translation."""} )
__A : Optional[int] = field(default=A__ , metadata={"""help""": """# num_beams to use for evaluation."""} )
__A : bool = field(
default=A__ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , F'''{split}_results.json''' ) )
def A__ ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCAmelCase = SeqaSeqDataset
# Get datasets
_UpperCAmelCase = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
_UpperCAmelCase = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCAmelCase = (
dataset_class(
SCREAMING_SNAKE_CASE_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCAmelCase = (
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE_ ) if training_args.predict_with_generate else None
)
_UpperCAmelCase = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , data_args=SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , )
_UpperCAmelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_UpperCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCAmelCase = trainer.evaluate(metric_key_prefix='''val''' )
_UpperCAmelCase = data_args.n_val
_UpperCAmelCase = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_UpperCAmelCase = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE_ , metric_key_prefix='''test''' )
_UpperCAmelCase = test_output.metrics
_UpperCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCAmelCase = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , SCREAMING_SNAKE_CASE_ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE_ )
if training_args.predict_with_generate:
_UpperCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = lmap(str.strip , SCREAMING_SNAKE_CASE_ )
write_txt_file(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 32 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase = 6_378_137.0
lowerCamelCase = 6_356_752.314_245
lowerCamelCase = 6_378_137
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase_ = haversine_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase_ = (b_lata + b_lata) / 2
UpperCAmelCase_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase_ = (sin(lowerCAmelCase__ ) ** 2) * (cos(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = cos(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma - sin(lowerCAmelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase_ = (cos(lowerCAmelCase__ ) ** 2) * (sin(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = sin(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma + sin(lowerCAmelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
# Imports
import numpy as np
class __magic_name__ :
'''simple docstring'''
def __init__( self:List[str] , _a:Union[str, Any]=None , _a:List[Any]=None , _a:Tuple=None , _a:Dict=None , _a:int=None ):
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Dict=None , _a:Any=None , _a:Optional[int]=None , _a:str=None , _a:Optional[int]=None ):
if red is not None:
snake_case__ = red
if green is not None:
snake_case__ = green
if blue is not None:
snake_case__ = blue
if red_edge is not None:
snake_case__ = red_edge
if nir is not None:
snake_case__ = nir
return True
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any]="" , _a:List[str]=None , _a:Optional[int]=None , _a:List[Any]=None , _a:Any=None , _a:List[str]=None ):
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a )
snake_case__ = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return self.nir * (self.red / (self.green**2))
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.nir - self.red) / (self.nir + self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return (self.nir - self.green) / (self.nir + self.green)
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Optional[Any]=0.08 , _a:Dict=1.22 , _a:str=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return (self.nir / self.green) - 1
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.nir / self.redEdge) - 1
def SCREAMING_SNAKE_CASE__ ( self:int ):
return (self.red - self.blue) / self.red
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return self.nir - self.green
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Any=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Any=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:List[Any]=None , _a:Optional[int]=None ):
return (self.nir - b) / (a * self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return (self.red + self.green + self.blue) / 30.5
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def SCREAMING_SNAKE_CASE__ ( self:int ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return self.green / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return self.nir / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:str ):
return self.red / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return (self.green - self.red) / (self.green + self.red)
def SCREAMING_SNAKE_CASE__ ( self:int ):
return (self.red - self.green) / (self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
snake_case__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.ndvi() + 0.5) ** (1 / 2)
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 33 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = 300
return config
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = MraModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = MraModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = ()
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 | 0 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase = 50_000
lowerCamelCase = 5_000
lowerCamelCase , lowerCamelCase = os.path.split(__file__)
lowerCamelCase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
def a__ ( ):
UpperCAmelCase_ = {"num examples": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
UpperCAmelCase_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
UpperCAmelCase_ = generate_example_dataset(
os.path.join(lowerCAmelCase__ , "dataset.arrow" ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print("shuffling dataset" )
UpperCAmelCase_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , "wb" ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 0 |
from timeit import timeit
def a ( A__ ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def a ( A__ ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
SCREAMING_SNAKE_CASE__ : List[str] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a ( ) -> None:
'''simple docstring'''
def do_benchmark(A__ ) -> None:
SCREAMING_SNAKE_CASE__ : List[Any] = '''import __main__ as z'''
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(A__ ) = }""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=A__ )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(A__ ) = }""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=A__ , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 35 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''image''': Image()} )
UpperCamelCase = Features({'''labels''': ClassLabel} )
UpperCamelCase = "image"
UpperCamelCase = "labels"
def lowercase__ ( self : str , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowercase__ ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 82 | 0 |
def lowercase ( __A : int = 200_0000 ) -> int:
'''simple docstring'''
snake_case : List[str] = [0 for i in range(n + 1 )]
snake_case : Optional[Any] = 1
snake_case : Tuple = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __A ):
snake_case : Optional[int] = 1
snake_case : List[str] = 0
for i in range(__A ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
UpperCamelCase : Optional[Any] = TypeVar("""T""")
class A__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase__ : T ):
a__ : int = data
a__ : List[Any] = self
a__ : Optional[Any] = 0
class A__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
# map from node name to the node object
a__ : dict[T, DisjointSetTreeNode[T]] = {}
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : T ):
# create a new set with x as its member
a__ : str = DisjointSetTreeNode(lowerCamelCase__ )
def _UpperCamelCase( self : int , lowerCamelCase__ : T ):
# find the set x belongs to (with path-compression)
a__ : Optional[Any] = self.map[data]
if elem_ref != elem_ref.parent:
a__ : Optional[int] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : DisjointSetTreeNode[T] , lowerCamelCase__ : DisjointSetTreeNode[T] ):
# helper function for union operation
if nodea.rank > nodea.rank:
a__ : Tuple = nodea
else:
a__ : Any = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : T , lowerCamelCase__ : T ):
# merge 2 disjoint sets
self.link(self.find_set(lowerCamelCase__ ) , self.find_set(lowerCamelCase__ ) )
class A__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ):
# connections: map from the node to the neighbouring nodes (with weights)
a__ : dict[T, dict[T, int]] = {}
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : T ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
a__ : List[str] = {}
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : T , lowerCamelCase__ : T , lowerCamelCase__ : int ):
# add an edge with the given weight
self.add_node(lowerCamelCase__ )
self.add_node(lowerCamelCase__ )
a__ : Tuple = weight
a__ : Union[str, Any] = weight
def _UpperCamelCase( self : Union[str, Any] ):
a__ : List[Any] = []
a__ : Dict = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCamelCase__ : x[2] )
# creating the disjoint set
a__ : Optional[Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCamelCase__ )
# MST generation
a__ : List[Any] = 0
a__ : Union[str, Any] = 0
a__ : Dict = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
a__, a__, a__ : str = edges[index]
index += 1
a__ : str = disjoint_set.find_set(lowerCamelCase__ )
a__ : List[str] = disjoint_set.find_set(lowerCamelCase__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
disjoint_set.union(lowerCamelCase__ , lowerCamelCase__ )
return graph
| 37 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ = 20 ):
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[Any] = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 38 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
else:
UpperCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
UpperCAmelCase_ = ["key_proj", "value_proj", "query_proj"]
UpperCAmelCase_ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCAmelCase_ = key.split("." )
if attributes[0] == "lm_head":
UpperCAmelCase_ = prophet
UpperCAmelCase_ = prophet_old
else:
UpperCAmelCase_ = prophet.prophetnet
UpperCAmelCase_ = prophet_old.model
UpperCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase_ = mapping[attribute]
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = attribute
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
UpperCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.bias
logger.info(f"""{attribute} is initialized""" )
UpperCAmelCase_ = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase__ , "in_proj_weight" ):
UpperCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase_ = True
break
if attribute.isdigit():
UpperCAmelCase_ = model[int(lowerCAmelCase__ )]
UpperCAmelCase_ = old_model[int(lowerCAmelCase__ )]
else:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_attribute == "":
UpperCAmelCase_ = old_model
else:
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 82 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase_ = sys.version_info >= (3, 10)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : bool
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : Optional[bool] = None
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "titi"
SCREAMING_SNAKE_CASE : Any = "toto"
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "titi"
SCREAMING_SNAKE_CASE : Optional[Any] = "toto"
SCREAMING_SNAKE_CASE : Any = 42
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : BasicEnum = "toto"
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = BasicEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto"
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] )
SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] )
SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[int] = field()
SCREAMING_SNAKE_CASE : str = field()
SCREAMING_SNAKE_CASE : BasicEnum = field()
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : "BasicEnum" = field()
SCREAMING_SNAKE_CASE : "Optional[bool]" = None
SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : bool | None = None
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int | None = None
SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE : str | None = None
SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] )
SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase )
self.assertFalse(example.flag )
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) )
def snake_case__( self : Tuple ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__( self : Tuple ) ->Union[str, Any]:
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
snake_case_ = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
snake_case_ = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def snake_case__( self : Optional[Any] ) ->List[Any]:
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_args([] )
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) )
snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->int:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , )
expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Dict ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
snake_case_ = parser.parse_dict(_UpperCamelCase )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : int ) ->Dict:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase )
def snake_case__( self : str ) ->Tuple:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = HfArgumentParser(_UpperCamelCase )
snake_case_ = {
'''foo''': 1_2,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' )
os.mkdir(_UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
snake_case_ = BasicExample(**_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Any ) ->Any:
snake_case_ = HfArgumentParser(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase ) | 39 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(lowerCAmelCase__ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) <= 1:
return arr, 0
UpperCAmelCase_ = len(lowerCAmelCase__ ) // 2
UpperCAmelCase_ = arr[0:mid]
UpperCAmelCase_ = arr[mid:]
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = _count_cross_inversions(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = 0
while i < len(lowerCAmelCase__ ) and j < len(lowerCAmelCase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a__ ( ):
UpperCAmelCase_ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowerCAmelCase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase__ )
# an empty list should also have zero inversions
UpperCAmelCase_ = []
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 82 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__UpperCAmelCase = get_logger(__name__)
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ = None ) -> Dict:
UpperCamelCase : List[str] = (
os.path.join(SCREAMING_SNAKE_CASE_, config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase : Any = Extractor
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase : int = os.path.abspath(SCREAMING_SNAKE_CASE_ )
return os.path.join(self.extract_dir, hash_url_to_filename(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> bool:
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE_ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE_ ) and os.listdir(SCREAMING_SNAKE_CASE_ ))
)
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ) -> str:
UpperCamelCase : Tuple = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if not extractor_format:
return input_path
UpperCamelCase : List[Any] = self._get_output_path(SCREAMING_SNAKE_CASE_ )
if self._do_extract(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.extractor.extract(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return output_path
class lowerCAmelCase_ ( a__ ):
@classmethod
@abstractmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> bool:
...
@staticmethod
@abstractmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
...
class lowerCAmelCase_ ( a__ , a__ ):
UpperCAmelCase__ : List[bytes] = []
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
with open(SCREAMING_SNAKE_CASE_, 'rb' ) as f:
return f.read(SCREAMING_SNAKE_CASE_ )
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = b"" ) -> bool:
if not magic_number:
UpperCamelCase : str = max(len(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase : List[str] = cls.read_magic_number(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
class lowerCAmelCase_ ( a__ ):
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> bool:
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ )
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
def resolved(SCREAMING_SNAKE_CASE_ ) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
def badpath(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ).startswith(SCREAMING_SNAKE_CASE_ )
def badlink(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase : Any = resolved(os.path.join(SCREAMING_SNAKE_CASE_, os.path.dirname(info.name ) ) )
return badpath(info.linkname, base=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = resolved(SCREAMING_SNAKE_CASE_ )
for finfo in members:
if badpath(finfo.name, SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
os.makedirs(SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = tarfile.open(SCREAMING_SNAKE_CASE_ )
tar_file.extractall(SCREAMING_SNAKE_CASE_, members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
tar_file.close()
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Dict = [b"\x1F\x8B"]
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
with gzip.open(SCREAMING_SNAKE_CASE_, 'rb' ) as gzip_file:
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[Any] = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = b"" ) -> bool:
if super().is_extractable(SCREAMING_SNAKE_CASE_, magic_number=SCREAMING_SNAKE_CASE_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE_, 'rb' ) as fp:
UpperCamelCase : Dict = _EndRecData(SCREAMING_SNAKE_CASE_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase : str = fp.read(SCREAMING_SNAKE_CASE_ ) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE_ ) == sizeCentralDir:
UpperCamelCase : int = struct.unpack(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
os.makedirs(SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ )
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_, 'r' ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE_ )
zip_file.close()
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
with lzma.open(SCREAMING_SNAKE_CASE_ ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[Any] = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = rarfile.RarFile(SCREAMING_SNAKE_CASE_ )
rf.extractall(SCREAMING_SNAKE_CASE_ )
rf.close()
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
UpperCamelCase : Any = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE_, 'rb' ) as ifh, open(SCREAMING_SNAKE_CASE_, 'wb' ) as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = [b"\x42\x5A\x68"]
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
with bza.open(SCREAMING_SNAKE_CASE_, 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ )
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_, 'r' ) as archive:
archive.extractall(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[Any] = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE_, 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
UpperCAmelCase__ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case_ ( cls ) -> Dict:
return max(
len(SCREAMING_SNAKE_CASE_ )
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE_, magic_number_length=SCREAMING_SNAKE_CASE_ )
except OSError:
return b""
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ) -> bool:
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.', category=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[Any] = cls.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_ ) -> str: # <Added version="2.4.0"/>
UpperCamelCase : Union[str, Any] = cls._get_magic_number_max_length()
UpperCamelCase : List[Any] = cls._read_magic_number(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_, magic_number=SCREAMING_SNAKE_CASE_ ):
return extractor_format
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "deprecated", ) -> None:
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE_ ), exist_ok=SCREAMING_SNAKE_CASE_ )
# Prevent parallel extractions
UpperCamelCase : List[Any] = str(Path(SCREAMING_SNAKE_CASE_ ).with_suffix('.lock' ) )
with FileLock(SCREAMING_SNAKE_CASE_ ):
shutil.rmtree(SCREAMING_SNAKE_CASE_, ignore_errors=SCREAMING_SNAKE_CASE_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.', category=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Union[str, Any] = extractor if extractor != 'deprecated' else extractor_format
else:
UpperCamelCase : List[Any] = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.', category=SCREAMING_SNAKE_CASE_, )
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ ):
return extractor.extract(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 40 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase_ = len(bin(lowerCAmelCase__ )[3:] )
UpperCAmelCase_ = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **_UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(_UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , _UpperCAmelCase : Union[str, List[str]] = None , **_UpperCAmelCase : Optional[int] , ) -> List[Any]:
'''simple docstring'''
if "text_queries" in kwargs:
UpperCAmelCase_ = kwargs.pop("text_queries" )
if isinstance(_UpperCAmelCase , (str, Image.Image) ):
UpperCAmelCase_ = {"image": image, "candidate_labels": candidate_labels}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def lowercase__ ( self : str , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {}
if "threshold" in kwargs:
UpperCAmelCase_ = kwargs["threshold"]
if "top_k" in kwargs:
UpperCAmelCase_ = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self : int , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = load_image(inputs["image"] )
UpperCAmelCase_ = inputs["candidate_labels"]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = candidate_labels.split("," )
UpperCAmelCase_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : int , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = model_inputs.pop("target_size" )
UpperCAmelCase_ = model_inputs.pop("candidate_label" )
UpperCAmelCase_ = model_inputs.pop("is_last" )
UpperCAmelCase_ = self.model(**_UpperCAmelCase )
UpperCAmelCase_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[str]=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
for model_output in model_outputs:
UpperCAmelCase_ = model_output["candidate_label"]
UpperCAmelCase_ = BaseModelOutput(_UpperCAmelCase )
UpperCAmelCase_ = self.image_processor.post_process_object_detection(
outputs=_UpperCAmelCase , threshold=_UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
UpperCAmelCase_ = outputs["scores"][index].item()
UpperCAmelCase_ = self._get_bounding_box(outputs["boxes"][index][0] )
UpperCAmelCase_ = {"score": score, "label": label, "box": box}
results.append(_UpperCAmelCase )
UpperCAmelCase_ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )
if top_k:
UpperCAmelCase_ = results[:top_k]
return results
def lowercase__ ( self : str , _UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = box.int().tolist()
UpperCAmelCase_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 0 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _UpperCamelCase ( __UpperCamelCase ) -> List[Tuple[int, ...]]:
lowerCamelCase_ = []
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple[int, ...]:
lowerCamelCase_ = []
for d in reversed(__UpperCamelCase ):
idx.append(flat_idx % d )
lowerCamelCase_ = flat_idx // d
return tuple(reversed(__UpperCamelCase ) )
@torch.jit.ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__UpperCamelCase ) -> None:
lowerCamelCase_ = True
for i in range(len(__UpperCamelCase ) ):
lowerCamelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCamelCase_ = l[reversed_idx]
if start_edges is None:
lowerCamelCase_ = [s == 0 for s in start]
reduce_edge_list(__UpperCamelCase )
if end_edges is None:
lowerCamelCase_ = [e == (d - 1) for e, d in zip(__UpperCamelCase ,__UpperCamelCase )]
reduce_edge_list(__UpperCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCamelCase ) == 0:
return [()]
elif len(__UpperCamelCase ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
lowerCamelCase_ = []
lowerCamelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCamelCase ,__UpperCamelCase ):
if s == e:
path_list.append(slice(__UpperCamelCase ,s + 1 ) )
else:
break
lowerCamelCase_ = tuple(__UpperCamelCase )
lowerCamelCase_ = len(__UpperCamelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ = start[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ = end[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCamelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> torch.Tensor:
lowerCamelCase_ = t.shape[:no_batch_dims]
lowerCamelCase_ = list(_flat_idx_to_idx(__UpperCamelCase ,__UpperCamelCase ) )
# _get_minimal_slice_set is inclusive
lowerCamelCase_ = list(_flat_idx_to_idx(flat_end - 1 ,__UpperCamelCase ) )
# Get an ordered list of slices to perform
lowerCamelCase_ = _get_minimal_slice_set(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
lowerCamelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = False ,) -> Any:
if not (len(__UpperCamelCase ) > 0):
raise ValueError('Must provide at least one input' )
lowerCamelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )]
lowerCamelCase_ = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] )
def _prep_inputs(__UpperCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCamelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCamelCase_ = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
lowerCamelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCamelCase_ = tensor_tree_map(_prep_inputs ,__UpperCamelCase )
lowerCamelCase_ = None
if _out is not None:
lowerCamelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
lowerCamelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCamelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCamelCase_ = 0
lowerCamelCase_ = prepped_outputs
for _ in range(__UpperCamelCase ):
# Chunk the input
if not low_mem:
lowerCamelCase_ = _select_chunk
else:
lowerCamelCase_ = partial(
_chunk_slice ,flat_start=__UpperCamelCase ,flat_end=min(__UpperCamelCase ,i + chunk_size ) ,no_batch_dims=len(__UpperCamelCase ) ,)
lowerCamelCase_ = tensor_tree_map(__UpperCamelCase ,__UpperCamelCase )
# Run the layer on the chunk
lowerCamelCase_ = layer(**__UpperCamelCase )
# Allocate space for the output
if out is None:
lowerCamelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,__UpperCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
def assign(__UpperCamelCase ,__UpperCamelCase ) -> None:
for k, v in da.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
assign(__UpperCamelCase ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCamelCase_ = da[k]
assign(__UpperCamelCase ,__UpperCamelCase )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
for xa, xa in zip(__UpperCamelCase ,__UpperCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCamelCase_ = xa
elif isinstance(__UpperCamelCase ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCamelCase_ = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
lowerCamelCase_ = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) ,__UpperCamelCase )
return out
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ = 512 , ) -> int:
'''simple docstring'''
lowerCamelCase_ = max_chunk_size
lowerCamelCase_ = None
lowerCamelCase_ = None
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCamelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCamelCase_ = [c for c in candidates if c > min_chunk_size]
lowerCamelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(SCREAMING_SNAKE_CASE_ ) -> bool:
try:
with torch.no_grad():
fn(*SCREAMING_SNAKE_CASE_ , chunk_size=SCREAMING_SNAKE_CASE_ )
return True
except RuntimeError:
return False
lowerCamelCase_ = 0
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ ) - 1
while i > min_viable_chunk_size_index:
lowerCamelCase_ = test_chunk_size(candidates[i] )
if not viable:
lowerCamelCase_ = (min_viable_chunk_size_index + i) // 2
else:
lowerCamelCase_ = i
lowerCamelCase_ = (i + len(SCREAMING_SNAKE_CASE_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
lowerCamelCase_ = True
for aa, aa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert type(SCREAMING_SNAKE_CASE_ ) == type(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )]
lowerCamelCase_ = [v for _, v in sorted(aa.items() , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )]
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
consistent &= aa == aa
return consistent
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> int:
'''simple docstring'''
lowerCamelCase_ = True
lowerCamelCase_ = tree_map(lambda SCREAMING_SNAKE_CASE_ : a.shape if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) else a , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self._compare_arg_caches(self.cached_arg_data , SCREAMING_SNAKE_CASE_ )
else:
# Otherwise, we can reuse the precomputed value
lowerCamelCase_ = False
if not consistent:
lowerCamelCase_ = self._determine_favorable_chunk_size(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 42 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=30 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Dict=None , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel(config=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
UpperCAmelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase__ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , tf.keras.layers.Layer ) )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 )
| 82 | 0 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = s.rsplit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return new.join(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase__ = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase__ = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
lowercase__ = rreplace(SCREAMING_SNAKE_CASE , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
lowercase__ = rreplace(SCREAMING_SNAKE_CASE , '''.b''' , '''.bias''' , 1 )
lowercase__ = value.float()
return upgrade
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
from dall_e import Encoder
lowercase__ = Encoder()
if os.path.exists(SCREAMING_SNAKE_CASE ):
lowercase__ = torch.load(SCREAMING_SNAKE_CASE )
else:
lowercase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = ckpt.state_dict()
encoder.load_state_dict(SCREAMING_SNAKE_CASE )
if config_path is not None:
lowercase__ = FlavaImageCodebookConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
lowercase__ = FlavaImageCodebookConfig()
lowercase__ = FlavaImageCodebook(SCREAMING_SNAKE_CASE ).eval()
lowercase__ = encoder.state_dict()
lowercase__ = upgrade_state_dict(SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
lowercase__ = hf_model.state_dict()
lowercase__ = count_parameters(SCREAMING_SNAKE_CASE )
lowercase__ = count_parameters(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCAmelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 43 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowerCamelCase = {
"""facebook/bart-base""": 1_024,
"""facebook/bart-large""": 1_024,
"""facebook/bart-large-mnli""": 1_024,
"""facebook/bart-large-cnn""": 1_024,
"""facebook/bart-large-xsum""": 1_024,
"""yjernite/bart_eli5""": 1_024,
}
@lru_cache()
def a__ ( ):
UpperCAmelCase_ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCAmelCase_ = bs[:]
UpperCAmelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ = char
return pairs
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]="replace" , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : List[Any]="<mask>" , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : Dict , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = errors # how to handle errors in decoding
UpperCAmelCase_ = bytes_to_unicode()
UpperCAmelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase_ = {}
UpperCAmelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ = tuple(_UpperCAmelCase )
UpperCAmelCase_ = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase_ = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ = bigram
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while i < len(_UpperCAmelCase ):
try:
UpperCAmelCase_ = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ = tuple(_UpperCAmelCase )
UpperCAmelCase_ = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase_ = get_pairs(_UpperCAmelCase )
UpperCAmelCase_ = " ".join(_UpperCAmelCase )
UpperCAmelCase_ = word
return word
def lowercase__ ( self : Dict , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = []
for token in re.findall(self.pat , _UpperCAmelCase ):
UpperCAmelCase_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(" " ) )
return bpe_tokens
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "".join(_UpperCAmelCase )
UpperCAmelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + "\n" )
UpperCAmelCase_ = 0
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(" ".join(_UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowercase__ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
UpperCAmelCase_ = " " + text
return (text, kwargs)
| 82 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Dict = False
def A_ ( _lowerCAmelCase : Namespace ):
"""simple docstring"""
return TrainCommand(_lowerCAmelCase )
class UpperCAmelCase__ ( A ):
@staticmethod
def lowerCamelCase_ ( __A : ArgumentParser ):
_lowerCamelCase : int = parser.add_parser("train",help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data",type=__A,required=__A,help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.",)
train_parser.add_argument(
"--column_label",type=__A,default=0,help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text",type=__A,default=1,help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id",type=__A,default=2,help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row",action="store_true",help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data",type=__A,default="",help="path to validation dataset." )
train_parser.add_argument(
"--validation_split",type=__A,default=0.1,help="if validation dataset is not provided, fraction of train dataset to use as validation dataset.",)
train_parser.add_argument("--output",type=__A,default="./",help="path to saved the trained model." )
train_parser.add_argument(
"--task",type=__A,default="text_classification",help="Task to train the model on." )
train_parser.add_argument(
"--model",type=__A,default="bert-base-uncased",help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size",type=__A,default=3_2,help="Batch size for training." )
train_parser.add_argument("--valid_batch_size",type=__A,default=6_4,help="Batch size for validation." )
train_parser.add_argument("--learning_rate",type=__A,default=3e-5,help="Learning rate." )
train_parser.add_argument("--adam_epsilon",type=__A,default=1e-08,help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=__A )
def __init__( self : List[Any],__A : Namespace ):
_lowerCamelCase : str = logging.get_logger("transformers-cli/training" )
_lowerCamelCase : Tuple = "tf" if is_tf_available() else "torch"
os.makedirs(args.output,exist_ok=__A )
_lowerCamelCase : Dict = args.output
_lowerCamelCase : List[str] = args.column_label
_lowerCamelCase : Tuple = args.column_text
_lowerCamelCase : Tuple = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
_lowerCamelCase : int = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
_lowerCamelCase : Optional[int] = Processor.create_from_csv(
args.train_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,)
_lowerCamelCase : Any = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
_lowerCamelCase : int = Processor.create_from_csv(
args.validation_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,)
_lowerCamelCase : Union[str, Any] = args.validation_split
_lowerCamelCase : Any = args.train_batch_size
_lowerCamelCase : Optional[int] = args.valid_batch_size
_lowerCamelCase : List[Any] = args.learning_rate
_lowerCamelCase : Union[str, Any] = args.adam_epsilon
def lowerCamelCase_ ( self : List[str] ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowerCamelCase_ ( self : int ):
raise NotImplementedError
def lowerCamelCase_ ( self : Optional[Any] ):
self.pipeline.fit(
self.train_dataset,validation_data=self.valid_dataset,validation_split=self.validation_split,learning_rate=self.learning_rate,adam_epsilon=self.adam_epsilon,train_batch_size=self.train_batch_size,valid_batch_size=self.valid_batch_size,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 44 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
UpperCAmelCase_ = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 82 | 0 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> int:
if not nums:
return 0
UpperCamelCase__ :Tuple = nums[0]
UpperCamelCase__ :int = 0
for num in nums[1:]:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = (
max_excluding + num,
max(lowercase__ , lowercase__ ),
)
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 |
"""simple docstring"""
lowerCamelCase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
UpperCAmelCase_ = Stack()
UpperCAmelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCAmelCase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCAmelCase__ )
elif i == ")":
# RULE 4
UpperCAmelCase_ = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operators[opr](lowerCAmelCase__ , lowerCAmelCase__ )
operand_stack.push(lowerCAmelCase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
_lowerCAmelCase : List[str] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_lowerCamelCase ) != count_coins(_lowerCamelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCamelCase, _lowerCamelCase : Dict = get_distrib(node.left )
_lowerCamelCase, _lowerCamelCase : str = get_distrib(node.right )
_lowerCamelCase : Any = 1 - left_distrib_excess
_lowerCamelCase : Dict = 1 - right_distrib_excess
_lowerCamelCase : Union[str, Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(_lowerCamelCase )
+ abs(_lowerCamelCase )
)
_lowerCamelCase : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_lowerCamelCase , _lowerCamelCase )
return get_distrib(_lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 46 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def a__ ( lowerCAmelCase__ = 35 ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
def UpperCAmelCase__ ( lowerCamelCase_ : type , lowerCamelCase_ : Optional[str] , lowerCamelCase_ : Optional[List[str]] = None , ):
__a : str = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__a : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__a : Dict = format_type
def UpperCAmelCase__ ( lowerCamelCase_ : Exception , lowerCamelCase_ : Optional[str] , lowerCamelCase_ : Optional[List[str]] = None ):
__a : List[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__a : int = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
SCREAMING_SNAKE_CASE__ = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
SCREAMING_SNAKE_CASE__ = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
SCREAMING_SNAKE_CASE__ = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[str] , **lowerCamelCase_ : Union[str, Any] ):
__a : Optional[int] = get_format_type_from_alias(lowerCamelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 47 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Optional[Any] = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''vit'''
def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=16 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = encoder_stride
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 82 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float ):
return 10 - x * x
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case_ ) * equation(snake_case_ ) >= 0:
raise ValueError('''Wrong space!''' )
__UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
__UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(snake_case_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case_ ) * equation(snake_case_ ) < 0:
__UpperCAmelCase = c
else:
__UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 49 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : int=30 , _UpperCAmelCase : Tuple=400 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 20, "width": 20}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_convert_rgb
UpperCAmelCase_ = [512, 1024, 2048, 4096]
UpperCAmelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ = 2048
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
UpperCAmelCase_ = "Hello"
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ = 3
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 82 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : List[str] = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def a__ ( lowerCAmelCase__ ):
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = get_config(lowerCAmelCase__ )
# load original model from timm
UpperCAmelCase_ = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(lowerCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_ = processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase__ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : int ):
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = BlipImageProcessor()
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
UpperCAmelCase = BlipProcessor(a__ , a__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Dict , **a__ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).tokenizer
def __snake_case ( self : Dict , **a__ : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Tuple ):
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[str] ):
UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(a__ , return_tensors='''np''' )
UpperCAmelCase = processor(images=a__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self : Any ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = processor(text=a__ )
UpperCAmelCase = tokenizer(a__ , return_token_type_ids=a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self : str ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(a__ )
UpperCAmelCase = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def __snake_case ( self : int ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=a__ , images=a__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 51 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase_ = list(accumulate(lowerCAmelCase__ ) )
UpperCAmelCase_ = bisect(lowerCAmelCase__ , lowerCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''PerceiverFeatureExtractor''']
A = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 52 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase = None
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( lowerCAmelCase__ ):
def remove_articles(lowerCAmelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def a__ ( lowerCAmelCase__ ):
if not s:
return []
return normalize_answer(lowerCAmelCase__ ).split()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = collections.Counter(lowerCAmelCase__ ) & collections.Counter(lowerCAmelCase__ )
UpperCAmelCase_ = sum(common.values() )
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["id"]
UpperCAmelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if not qid_list:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
plt.step(lowerCAmelCase__ , lowerCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCAmelCase__ , lowerCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase__ )
plt.savefig(lowerCAmelCase__ )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(lowerCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(lowerCAmelCase__ )
if i == len(lowerCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase__ )
recalls.append(lowerCAmelCase__ )
if out_image:
plot_pr_curve(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if out_image_dir and not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ = {k: float(lowerCAmelCase__ ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_exact" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_f1" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_oracle" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(lowerCAmelCase__ ) / float(len(lowerCAmelCase__ ) )
plt.hist(lowerCAmelCase__ , weights=lowerCAmelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase__ ), best_thresh
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def a__ ( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
UpperCAmelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(lowerCAmelCase__ ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
else:
print(json.dumps(lowerCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 82 | 0 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = 'hf-internal-testing/tiny-random-t5'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer('This is me' , return_tensors='pt' )
__lowerCAmelCase = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__lowerCAmelCase = model.generate(**lowerCAmelCase_ )
__lowerCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__lowerCAmelCase = model_reloaded.generate(**lowerCAmelCase_ )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ) )
def lowercase ( self : int ) -> Dict:
__lowerCAmelCase = 'hf-internal-testing/tiny-random-t5'
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCAmelCase_ ):
model.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(lowerCAmelCase_ )
| 53 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float(moles / volume ) * nfactor )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 54 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase = 6_378_137.0
lowerCamelCase = 6_356_752.314_245
lowerCamelCase = 6_378_137
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase_ = haversine_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase_ = (b_lata + b_lata) / 2
UpperCAmelCase_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase_ = (sin(lowerCAmelCase__ ) ** 2) * (cos(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = cos(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma - sin(lowerCAmelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase_ = (cos(lowerCAmelCase__ ) ** 2) * (sin(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = sin(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma + sin(lowerCAmelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE :Tuple = 16
SCREAMING_SNAKE_CASE :List[str] = 32
def UpperCAmelCase ( a_ , a_ = 1_6 ) -> Union[str, Any]:
"""simple docstring"""
__A = AutoTokenizer.from_pretrained("bert-base-cased" )
__A = load_dataset("glue" , "mrpc" )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
__A = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A = datasets.map(
a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A = 1_6
elif accelerator.mixed_precision != "no":
__A = 8
else:
__A = None
return tokenizer.pad(
a_ , padding="longest" , max_length=a_ , pad_to_multiple_of=a_ , return_tensors="pt" , )
# Instantiate dataloaders.
__A = DataLoader(
tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
__A = DataLoader(
tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE :str = mocked_dataloaders # noqa: F811
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , a_ ) == "1":
__A = 2
# Initialize accelerator
__A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A = config["lr"]
__A = int(config["num_epochs"] )
__A = int(config["seed"] )
__A = int(config["batch_size"] )
__A = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
__A = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__A = batch_size // MAX_GPU_BATCH_SIZE
__A = MAX_GPU_BATCH_SIZE
set_seed(a_ )
__A , __A = get_dataloaders(a_ , a_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A = model.to(accelerator.device )
# Instantiate optimizer
__A = AdamW(params=model.parameters() , lr=a_ )
# Instantiate scheduler
__A = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=1_0_0 , num_training_steps=(len(a_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A , __A , __A , __A , __A = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# Now we train the model
for epoch in range(a_ ):
model.train()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__A = model(**a_ )
__A = outputs.loss
__A = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__A = 0
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A = model(**a_ )
__A = outputs.logits.argmax(dim=-1 )
__A , __A = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(a_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=a_ , references=a_ , )
__A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , a_ )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
__A = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=a_ , default=a_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__A = parser.parse_args()
__A = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 55 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = 300
return config
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = MraModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = MraModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = ()
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a : Optional[Any] = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : Union[str, None] = None , SCREAMING_SNAKE_CASE_ : Union[List[str], None] = None , SCREAMING_SNAKE_CASE_ : Union[str, List[str], None] = None , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Any:
__snake_case = [file for file in os.listdir(SCREAMING_SNAKE_CASE_ ) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py' )
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , SCREAMING_SNAKE_CASE_ )
if only_modules:
__snake_case = file.split('.' )[0]
try:
__snake_case = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = doctest.DocTestSuite(SCREAMING_SNAKE_CASE_ )
__snake_case = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
__snake_case = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def a ( self : List[Any] ) -> int:
__snake_case = Path('src/transformers' )
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ , ignore_files=SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> str:
__snake_case = Path('src/transformers' )
__snake_case = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = Path('src/transformers' )
__snake_case = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> int:
__snake_case = Path('src/transformers' )
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE_ , n_identifier=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
__snake_case = Path('docs/source' )
__snake_case = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE_ , ignore_files=SCREAMING_SNAKE_CASE_ , only_modules=SCREAMING_SNAKE_CASE_ )
| 56 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase = 50_000
lowerCamelCase = 5_000
lowerCamelCase , lowerCamelCase = os.path.split(__file__)
lowerCamelCase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
def a__ ( ):
UpperCAmelCase_ = {"num examples": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
UpperCAmelCase_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
UpperCAmelCase_ = generate_example_dataset(
os.path.join(lowerCAmelCase__ , "dataset.arrow" ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print("shuffling dataset" )
UpperCAmelCase_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , "wb" ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 0 |
A_ : Dict = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : Optional[Any] = ['a', 'b', 'c', 'd', 'e']
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
UpperCamelCase_: int = start
# add current to visited
visited.append(UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCamelCase_: Optional[Any] = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCAmelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
for vertice in vertices:
if vertice not in visited:
UpperCamelCase_: Union[str, Any] = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# return sort
return sort
if __name__ == "__main__":
A_ : Dict = topological_sort('a', [], [])
print(sort) | 57 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''image''': Image()} )
UpperCamelCase = Features({'''labels''': ClassLabel} )
UpperCamelCase = "image"
UpperCamelCase = "labels"
def lowercase__ ( self : str , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowercase__ ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 82 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Any = a.name
snake_case_ : Optional[Any] = b.name
snake_case_ : Dict = """"""
snake_case_ : Optional[Any] = """"""
snake_case_ : Any = a == b
snake_case_ : Optional[int] = name_a
snake_case_ : Optional[Any] = name_b
return res
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__UpperCamelCase , __UpperCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __UpperCamelCase , __UpperCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = list(model.graph.initializer )
snake_case_ : int = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ : Union[str, Any] = inits[i].name
snake_case_ : Union[str, Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[str] = os.path.dirname(__UpperCamelCase )
snake_case_ : Tuple = os.path.basename(__UpperCamelCase )
snake_case_ : str = onnx.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ : Optional[Any] = list(model.graph.initializer )
snake_case_ : str = set()
snake_case_ : int = {}
snake_case_ : Tuple = []
snake_case_ : Union[str, Any] = 0
for i in range(len(__UpperCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__UpperCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__UpperCamelCase )
dup_set.add(__UpperCamelCase )
snake_case_ : Optional[Any] = inits[j].data_type
snake_case_ : Tuple = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ , __UpperCamelCase )
total_reduced_size += mem_size
snake_case_ : Union[str, Any] = inits[i].name
snake_case_ : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__UpperCamelCase )
else:
snake_case_ : Any = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , """GB""" )
snake_case_ : Dict = sorted(__UpperCamelCase )
_remove_dup_initializers_from_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Optional[int] = """optimized_""" + model_file_name
snake_case_ : Dict = os.path.join(__UpperCamelCase , __UpperCamelCase )
onnx.save(__UpperCamelCase , __UpperCamelCase )
return new_model
| 58 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "sentencepiece.bpe.model"}
__A = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
__A = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
__A = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = []
lowercase_ = []
def __init__(self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Union[str, Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : Dict , ) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
lowerCamelCase__: str ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
lowerCamelCase__: int =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase__: List[str] ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase__: Optional[int] =1
lowerCamelCase__: Optional[Any] =len(self.sp_model)
lowerCamelCase__: int ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase_)
}
lowerCamelCase__: int ={v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase__: str =len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
lowerCamelCase__: Optional[int] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase__: Optional[Any] =list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
lowerCamelCase__: Optional[int] =src_lang if src_lang is not None else "en_XX"
lowerCamelCase__: int =self.lang_code_to_id[self._src_lang]
lowerCamelCase__: Dict =tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__(self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: str =self.__dict__.copy()
lowerCamelCase__: Optional[Any] =None
lowerCamelCase__: List[Any] =self.sp_model.serialized_model_proto()
return state
def __setstate__(self : List[Any] , UpperCAmelCase_ : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
lowerCamelCase__: Dict ={}
lowerCamelCase__: List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : str) ->None:
'''simple docstring'''
lowerCamelCase__: List[str] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Any =[1] * len(self.prefix_tokens)
lowerCamelCase__: Tuple =[1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_)) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_)) + ([0] * len(UpperCAmelCase_)) + suffix_ones
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
lowerCamelCase__: int =[self.sep_token_id]
lowerCamelCase__: int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] , UpperCAmelCase_ : Optional[str] , **UpperCAmelCase_ : Any) ->Tuple:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
lowerCamelCase__: Any =src_lang
lowerCamelCase__: str =self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.convert_tokens_to_ids(UpperCAmelCase_)
lowerCamelCase__: List[Any] =tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str ={self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[str]) ->Dict:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__: Any =self.sp_model.PieceToId(UpperCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_).replace(UpperCAmelCase_ , " ").strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCamelCase__: str =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , "wb") as fi:
lowerCamelCase__: Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str = "en_XX" , UpperCAmelCase_ : Optional[List[str]] = None , UpperCAmelCase_ : str = "ro_RO" , **UpperCAmelCase_ : List[str] , ) ->BatchEncoding:
'''simple docstring'''
lowerCamelCase__: Optional[int] =src_lang
lowerCamelCase__: Optional[Any] =tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang)
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str) ->None:
'''simple docstring'''
lowerCamelCase__: Any =self.lang_code_to_id[src_lang]
lowerCamelCase__: Tuple =[]
lowerCamelCase__: Union[str, Any] =[self.eos_token_id, self.cur_lang_code]
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str) ->None:
'''simple docstring'''
lowerCamelCase__: str =self.lang_code_to_id[lang]
lowerCamelCase__: int =[]
lowerCamelCase__: int =[self.eos_token_id, self.cur_lang_code]
| 59 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ = 20 ):
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
import requests
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Tuple = {'''Content-Type''': '''application/json'''}
snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase )
if response.status_code != 200:
snake_case_ : List[Any] = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 60 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
else:
UpperCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
UpperCAmelCase_ = ["key_proj", "value_proj", "query_proj"]
UpperCAmelCase_ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCAmelCase_ = key.split("." )
if attributes[0] == "lm_head":
UpperCAmelCase_ = prophet
UpperCAmelCase_ = prophet_old
else:
UpperCAmelCase_ = prophet.prophetnet
UpperCAmelCase_ = prophet_old.model
UpperCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase_ = mapping[attribute]
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = attribute
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
UpperCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.bias
logger.info(f"""{attribute} is initialized""" )
UpperCAmelCase_ = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase__ , "in_proj_weight" ):
UpperCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase_ = True
break
if attribute.isdigit():
UpperCAmelCase_ = model[int(lowerCAmelCase__ )]
UpperCAmelCase_ = old_model[int(lowerCAmelCase__ )]
else:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_attribute == "":
UpperCAmelCase_ = old_model
else:
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 82 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=32 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : Optional[Any]=[32, 64, 128] , SCREAMING_SNAKE_CASE__ : Optional[Any]=[1, 2, 1] , SCREAMING_SNAKE_CASE__ : Optional[int]=[2, 2, 4] , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : List[Any]=2.0 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1e-5 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 , SCREAMING_SNAKE_CASE__ : Optional[int]=8 , SCREAMING_SNAKE_CASE__ : Tuple=["stage1", "stage2"] , SCREAMING_SNAKE_CASE__ : Dict=[1, 2] , ) -> Any:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = use_absolute_embeddings
lowerCAmelCase__ = patch_norm
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = is_training
lowerCAmelCase__ = scope
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = encoder_stride
lowerCAmelCase__ = out_features
lowerCAmelCase__ = out_indices
def a ( self : int ) -> Any:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : int ) -> int:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
lowerCAmelCase__ = FocalNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
lowerCAmelCase__ = FocalNetBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCAmelCase__ = None
lowerCAmelCase__ = FocalNetBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
lowerCAmelCase__ = FocalNetForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = FocalNetForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self : Union[str, Any] ) -> Any:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : Optional[Any] ) -> Dict:
lowerCAmelCase__ = FocalNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , embed_dim=37 , has_text_modality=SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : int ) -> Tuple:
return
def a ( self : int ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> List[str]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def a ( self : List[Any] ) -> List[Any]:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def a ( self : int ) -> Dict:
pass
def a ( self : List[str] ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def a ( self : List[str] ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# FocalNet has a different seq_length
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reshaped_hidden_states[0].shape
lowerCAmelCase__ = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (padded_height, padded_width) )
@slow
def a ( self : Optional[int] ) -> Any:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = FocalNetModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = _config_zero_init(SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Optional[Any] ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def a ( self : int ) -> Union[str, Any]:
lowerCAmelCase__ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (FocalNetBackbone,) if is_torch_available() else ()
snake_case__ = FocalNetConfig
snake_case__ = False
def a ( self : int ) -> Union[str, Any]:
lowerCAmelCase__ = FocalNetModelTester(self )
| 61 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(lowerCAmelCase__ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) <= 1:
return arr, 0
UpperCAmelCase_ = len(lowerCAmelCase__ ) // 2
UpperCAmelCase_ = arr[0:mid]
UpperCAmelCase_ = arr[mid:]
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = _count_cross_inversions(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = 0
while i < len(lowerCAmelCase__ ) and j < len(lowerCAmelCase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a__ ( ):
UpperCAmelCase_ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowerCAmelCase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase__ )
# an empty list should also have zero inversions
UpperCAmelCase_ = []
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 82 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = '''transfo-xl'''
UpperCamelCase_ : List[str] = ['''mems''']
UpperCamelCase_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , UpperCAmelCase_ : List[str]=26_7735 , UpperCAmelCase_ : str=[2_0000, 4_0000, 20_0000] , UpperCAmelCase_ : Optional[int]=1024 , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Union[str, Any]=4096 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[str]=18 , UpperCAmelCase_ : List[str]=1600 , UpperCAmelCase_ : Optional[Any]=1000 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : int=-1 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]="normal" , UpperCAmelCase_ : Optional[Any]=0.01 , UpperCAmelCase_ : Any=0.01 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Dict=1E-5 , UpperCAmelCase_ : int=0 , **UpperCAmelCase_ : List[Any] , ):
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = []
self.cutoffs.extend(UpperCAmelCase_ )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE : List[Any] = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE : Dict = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE : Dict = d_model
SCREAMING_SNAKE_CASE : Optional[int] = d_embed
SCREAMING_SNAKE_CASE : Optional[Any] = d_head
SCREAMING_SNAKE_CASE : Any = d_inner
SCREAMING_SNAKE_CASE : str = div_val
SCREAMING_SNAKE_CASE : str = pre_lnorm
SCREAMING_SNAKE_CASE : Optional[int] = n_layer
SCREAMING_SNAKE_CASE : Optional[int] = n_head
SCREAMING_SNAKE_CASE : List[str] = mem_len
SCREAMING_SNAKE_CASE : Optional[int] = same_length
SCREAMING_SNAKE_CASE : Optional[int] = attn_type
SCREAMING_SNAKE_CASE : List[Any] = clamp_len
SCREAMING_SNAKE_CASE : Dict = sample_softmax
SCREAMING_SNAKE_CASE : List[str] = adaptive
SCREAMING_SNAKE_CASE : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = dropatt
SCREAMING_SNAKE_CASE : str = untie_r
SCREAMING_SNAKE_CASE : str = init
SCREAMING_SNAKE_CASE : List[Any] = init_range
SCREAMING_SNAKE_CASE : Union[str, Any] = proj_init_std
SCREAMING_SNAKE_CASE : Optional[Any] = init_std
SCREAMING_SNAKE_CASE : Any = layer_norm_epsilon
super().__init__(eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : Any ):
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _A ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 62 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase_ = len(bin(lowerCAmelCase__ )[3:] )
UpperCAmelCase_ = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a : str = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__lowercase : Tuple , **__lowercase : str ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 63 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **_UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(_UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , _UpperCAmelCase : Union[str, List[str]] = None , **_UpperCAmelCase : Optional[int] , ) -> List[Any]:
'''simple docstring'''
if "text_queries" in kwargs:
UpperCAmelCase_ = kwargs.pop("text_queries" )
if isinstance(_UpperCAmelCase , (str, Image.Image) ):
UpperCAmelCase_ = {"image": image, "candidate_labels": candidate_labels}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def lowercase__ ( self : str , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {}
if "threshold" in kwargs:
UpperCAmelCase_ = kwargs["threshold"]
if "top_k" in kwargs:
UpperCAmelCase_ = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self : int , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = load_image(inputs["image"] )
UpperCAmelCase_ = inputs["candidate_labels"]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = candidate_labels.split("," )
UpperCAmelCase_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : int , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = model_inputs.pop("target_size" )
UpperCAmelCase_ = model_inputs.pop("candidate_label" )
UpperCAmelCase_ = model_inputs.pop("is_last" )
UpperCAmelCase_ = self.model(**_UpperCAmelCase )
UpperCAmelCase_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[str]=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
for model_output in model_outputs:
UpperCAmelCase_ = model_output["candidate_label"]
UpperCAmelCase_ = BaseModelOutput(_UpperCAmelCase )
UpperCAmelCase_ = self.image_processor.post_process_object_detection(
outputs=_UpperCAmelCase , threshold=_UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
UpperCAmelCase_ = outputs["scores"][index].item()
UpperCAmelCase_ = self._get_bounding_box(outputs["boxes"][index][0] )
UpperCAmelCase_ = {"score": score, "label": label, "box": box}
results.append(_UpperCAmelCase )
UpperCAmelCase_ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )
if top_k:
UpperCAmelCase_ = results[:top_k]
return results
def lowercase__ ( self : str , _UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = box.int().tolist()
UpperCAmelCase_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 0 |
from __future__ import annotations
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: int= TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(lowerCAmelCase ) != 0:
SCREAMING_SNAKE_CASE__: List[str]= len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase , (int, float) ):
raise error
SCREAMING_SNAKE_CASE__: Optional[Any]= rows
else:
SCREAMING_SNAKE_CASE__: List[str]= []
def UpperCamelCase_ ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCamelCase_ ( self ) -> int:
return len(self.rows )
@property
def UpperCamelCase_ ( self ) -> int:
return len(self.rows[0] )
@property
def UpperCamelCase_ ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def UpperCamelCase_ ( self ) -> bool:
return self.order[0] == self.order[1]
def UpperCamelCase_ ( self ) -> Matrix:
SCREAMING_SNAKE_CASE__: Optional[int]= [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCamelCase_ ( self ) -> bool:
return bool(self.determinant() )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: Tuple= [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase ).determinant()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase , lowerCAmelCase )
return -1 * self.get_minor(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(lowerCAmelCase , lowerCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCamelCase_ ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCamelCase_ ( self ) -> Matrix:
SCREAMING_SNAKE_CASE__: int= [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Matrix:
SCREAMING_SNAKE_CASE__: int= self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(lowerCAmelCase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> None:
SCREAMING_SNAKE_CASE__: Any= TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase , (int, float) ):
raise type_error
if len(lowerCAmelCase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.rows[0:position] + [row] + self.rows[position:]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> None:
SCREAMING_SNAKE_CASE__: int= TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase , (int, float) ):
raise type_error
if len(lowerCAmelCase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
SCREAMING_SNAKE_CASE__: List[str]= [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCAmelCase ) -> bool:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , lowerCAmelCase ) -> Matrix:
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCAmelCase ) -> Matrix:
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCAmelCase ) -> Matrix:
if isinstance(lowerCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase , lowerCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self , lowerCAmelCase ) -> Matrix:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
SCREAMING_SNAKE_CASE__: str= self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase ) -> int:
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=30 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Dict=None , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel(config=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
UpperCAmelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase__ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , tf.keras.layers.Layer ) )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 )
| 82 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class __lowercase ( __lowerCamelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case_ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ = Features({"""text""": Value("""string""" )} )
snake_case_ = Features({"""labels""": ClassLabel} )
snake_case_ = "text"
snake_case_ = "labels"
def __lowercase ( self : Dict ,A : int ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
UpperCAmelCase__ : Dict = copy.deepcopy(self )
UpperCAmelCase__ : Optional[int] = self.label_schema.copy()
UpperCAmelCase__ : Dict = features[self.label_column]
UpperCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 65 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowerCamelCase = {
"""facebook/bart-base""": 1_024,
"""facebook/bart-large""": 1_024,
"""facebook/bart-large-mnli""": 1_024,
"""facebook/bart-large-cnn""": 1_024,
"""facebook/bart-large-xsum""": 1_024,
"""yjernite/bart_eli5""": 1_024,
}
@lru_cache()
def a__ ( ):
UpperCAmelCase_ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCAmelCase_ = bs[:]
UpperCAmelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ = char
return pairs
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]="replace" , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : List[Any]="<mask>" , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : Dict , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = errors # how to handle errors in decoding
UpperCAmelCase_ = bytes_to_unicode()
UpperCAmelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase_ = {}
UpperCAmelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ = tuple(_UpperCAmelCase )
UpperCAmelCase_ = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase_ = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ = bigram
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while i < len(_UpperCAmelCase ):
try:
UpperCAmelCase_ = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ = tuple(_UpperCAmelCase )
UpperCAmelCase_ = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase_ = get_pairs(_UpperCAmelCase )
UpperCAmelCase_ = " ".join(_UpperCAmelCase )
UpperCAmelCase_ = word
return word
def lowercase__ ( self : Dict , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = []
for token in re.findall(self.pat , _UpperCAmelCase ):
UpperCAmelCase_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(" " ) )
return bpe_tokens
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "".join(_UpperCAmelCase )
UpperCAmelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + "\n" )
UpperCAmelCase_ = 0
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(" ".join(_UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowercase__ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
UpperCAmelCase_ = " " + text
return (text, kwargs)
| 82 | 0 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
_lowercase : int = []
_lowercase : Optional[Any] = 2
_lowercase : Union[str, Any] = int(math.sqrt(SCREAMING_SNAKE_CASE ) ) # Size of every segment
_lowercase : Union[str, Any] = [True] * (end + 1)
_lowercase : List[str] = []
while start <= end:
if temp[start] is True:
in_prime.append(SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = False
start += 1
prime += in_prime
_lowercase : int = end + 1
_lowercase : List[str] = min(2 * end , SCREAMING_SNAKE_CASE )
while low <= n:
_lowercase : str = [True] * (high - low + 1)
for each in in_prime:
_lowercase : Dict = math.floor(low / each ) * each
if t < low:
t += each
for j in range(SCREAMING_SNAKE_CASE , high + 1 , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = False
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase : Tuple = high + 1
_lowercase : Any = min(high + end , SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 66 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
UpperCAmelCase_ = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 82 | 0 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
_lowercase = nums[0]
for i in range(1 , len(snake_case__ ) ):
_lowercase = nums[i]
_lowercase = max(snake_case__ , ans + num , snake_case__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
snake_case = int(input("""Enter number of elements : """).strip())
snake_case = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array)) | 67 |
"""simple docstring"""
lowerCamelCase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
UpperCAmelCase_ = Stack()
UpperCAmelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCAmelCase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCAmelCase__ )
elif i == ")":
# RULE 4
UpperCAmelCase_ = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operators[opr](lowerCAmelCase__ , lowerCAmelCase__ )
operand_stack.push(lowerCAmelCase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 0 |
from itertools import permutations
def lowercase__ ( A_: tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__UpperCAmelCase =[7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase__ ( A_: int = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def a__ ( lowerCAmelCase__ = 35 ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__snake_case = gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
__snake_case = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case = gray_code_sequence_string(bit_count - 1 )
__snake_case = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case = "0" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case = "1" + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
lowerCamelCase : Optional[Any] = 8.314_4598
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float ):
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCamelCase : Tuple = 300
lowerCamelCase : str = 28
lowerCamelCase : List[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 70 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''vit'''
def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=16 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = encoder_stride
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 82 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : jnp.ndarray
__A : jnp.ndarray
class _snake_case (nn.Module):
__A : int
__A : Tuple[int] =(16, 32, 96, 2_56)
__A : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
UpperCAmelCase_ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase_ : str = self.block_out_channels[i]
UpperCAmelCase_ : Tuple = self.block_out_channels[i + 1]
UpperCAmelCase_ : Any = nn.Conv(
_snake_case ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(_snake_case )
UpperCAmelCase_ : Optional[int] = nn.Conv(
_snake_case ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(_snake_case )
UpperCAmelCase_ : Tuple = blocks
UpperCAmelCase_ : str = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,_snake_case ):
UpperCAmelCase_ : Dict = self.conv_in(_snake_case )
UpperCAmelCase_ : int = nn.silu(_snake_case )
for block in self.blocks:
UpperCAmelCase_ : Union[str, Any] = block(_snake_case )
UpperCAmelCase_ : Tuple = nn.silu(_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.conv_out(_snake_case )
return embedding
@flax_register_to_config
class _snake_case (nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__A : int =32
__A : int =4
__A : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__A : Union[bool, Tuple[bool]] =False
__A : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
__A : int =2
__A : Union[int, Tuple[int]] =8
__A : Optional[Union[int, Tuple[int]]] =None
__A : int =12_80
__A : float =0.0
__A : bool =False
__A : jnp.dtype =jnp.floataa
__A : bool =True
__A : int =0
__A : str ="rgb"
__A : Tuple[int] =(16, 32, 96, 2_56)
def UpperCamelCase__ ( self ,_snake_case ):
# init input tensors
UpperCAmelCase_ : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ : Any = jnp.zeros(_snake_case ,dtype=jnp.floataa )
UpperCAmelCase_ : Tuple = jnp.ones((1,) ,dtype=jnp.intaa )
UpperCAmelCase_ : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
UpperCAmelCase_ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase_ : int = jnp.zeros(_snake_case ,dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ : str = jax.random.split(_snake_case )
UpperCAmelCase_ : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
return self.init(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )["params"]
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.block_out_channels
UpperCAmelCase_ : Union[str, Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ : Dict = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ : str = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
UpperCAmelCase_ : Dict = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
UpperCAmelCase_ : List[Any] = FlaxTimestepEmbedding(_snake_case ,dtype=self.dtype )
UpperCAmelCase_ : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
UpperCAmelCase_ : Any = self.only_cross_attention
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[str] = block_out_channels[0]
UpperCAmelCase_ : Union[str, Any] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ : Tuple = output_channel
UpperCAmelCase_ : List[Any] = block_out_channels[i]
UpperCAmelCase_ : int = i == len(_snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ : int = FlaxCrossAttnDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
UpperCAmelCase_ : List[Any] = FlaxDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(_snake_case )
for _ in range(self.layers_per_block ):
UpperCAmelCase_ : List[Any] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
if not is_final_block:
UpperCAmelCase_ : int = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
UpperCAmelCase_ : int = down_blocks
UpperCAmelCase_ : Union[str, Any] = controlnet_down_blocks
# mid
UpperCAmelCase_ : int = block_out_channels[-1]
UpperCAmelCase_ : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=_snake_case ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
UpperCAmelCase_ : List[str] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 1.0 ,_snake_case = True ,_snake_case = False ,):
UpperCAmelCase_ : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase_ : Union[str, Any] = jnp.flip(_snake_case ,axis=1 )
# 1. time
if not isinstance(_snake_case ,jnp.ndarray ):
UpperCAmelCase_ : Optional[int] = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(_snake_case ,jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : str = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ : Optional[int] = jnp.expand_dims(_snake_case ,0 )
UpperCAmelCase_ : str = self.time_proj(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.time_embedding(_snake_case )
# 2. pre-process
UpperCAmelCase_ : Union[str, Any] = jnp.transpose(_snake_case ,(0, 2, 3, 1) )
UpperCAmelCase_ : List[str] = self.conv_in(_snake_case )
UpperCAmelCase_ : Tuple = jnp.transpose(_snake_case ,(0, 2, 3, 1) )
UpperCAmelCase_ : Optional[int] = self.controlnet_cond_embedding(_snake_case )
sample += controlnet_cond
# 3. down
UpperCAmelCase_ : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = down_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ : str = down_block(_snake_case ,_snake_case ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase_ : str = self.mid_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
# 5. contronet blocks
UpperCAmelCase_ : int = ()
for down_block_res_sample, controlnet_block in zip(_snake_case ,self.controlnet_down_blocks ):
UpperCAmelCase_ : List[str] = controlnet_block(_snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ : str = controlnet_down_block_res_samples
UpperCAmelCase_ : List[Any] = self.controlnet_mid_block(_snake_case )
# 6. scaling
UpperCAmelCase_ : Any = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_snake_case ,mid_block_res_sample=_snake_case )
| 71 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : int=30 , _UpperCAmelCase : Tuple=400 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 20, "width": 20}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_convert_rgb
UpperCAmelCase_ = [512, 1024, 2048, 4096]
UpperCAmelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ = 2048
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
UpperCAmelCase_ = "Hello"
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ = 3
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 82 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase ( lowercase_ : str , lowercase_ : str , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : int , lowercase_ : Optional[int] = None , ) -> int:
'''simple docstring'''
lowercase ={}
if train_file is not None:
lowercase =[train_file]
if eval_file is not None:
lowercase =[eval_file]
if test_file is not None:
lowercase =[test_file]
lowercase =datasets.load_dataset('''csv''' , data_files=lowercase_ )
lowercase =list(ds[list(files.keys() )[0]].features.keys() )
lowercase =features_name.pop(lowercase_ )
lowercase =list(set(ds[list(files.keys() )[0]][label_name] ) )
lowercase ={label: i for i, label in enumerate(lowercase_ )}
lowercase =tokenizer.model_input_names
lowercase ={}
if len(lowercase_ ) == 1:
for k in files.keys():
lowercase =ds[k].map(
lambda lowercase_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase_ , max_length=lowercase_ , padding='''max_length''' ) , batched=lowercase_ , )
elif len(lowercase_ ) == 2:
for k in files.keys():
lowercase =ds[k].map(
lambda lowercase_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase_ , max_length=lowercase_ , padding='''max_length''' , ) , batched=lowercase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowercase ={k: v for k, v in ex.items() if k in input_names}
lowercase =labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowercase ={k: v for k, v in ex.items() if k in input_names}
lowercase =labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowercase ={k: v for k, v in ex.items() if k in input_names}
lowercase =labelaid[ex[label_name]]
yield (d, label)
lowercase =(
tf.data.Dataset.from_generator(
lowercase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowercase =train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowercase =(
tf.data.Dataset.from_generator(
lowercase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowercase =val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowercase =(
tf.data.Dataset.from_generator(
lowercase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowercase =test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(metadata={'help': 'Which column contains the label'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the training file'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the development file'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The path of the test file'} )
UpperCamelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
lowercase =HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowercase , lowercase , lowercase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase , lowercase , lowercase , lowercase =get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowercase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowercase =TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase_ : EvalPrediction ) -> Dict:
lowercase =np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowercase =TFTrainer(
model=lowercase_ , args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , compute_metrics=lowercase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase =trainer.evaluate()
lowercase =os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(lowercase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(lowercase_ )
return results
if __name__ == "__main__":
main()
| 72 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def a__ ( lowerCAmelCase__ ):
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = get_config(lowerCAmelCase__ )
# load original model from timm
UpperCAmelCase_ = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(lowerCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_ = processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase__ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Union[str, Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase_ = list(accumulate(lowerCAmelCase__ ) )
UpperCAmelCase_ = bisect(lowerCAmelCase__ , lowerCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = len(snake_case ) + 1
__SCREAMING_SNAKE_CASE : Tuple = len(snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__SCREAMING_SNAKE_CASE : Dict = [[0 for i in range(snake_case )] for j in range(snake_case )]
# since string of zero length match pattern of zero length
__SCREAMING_SNAKE_CASE : Tuple = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , snake_case ):
__SCREAMING_SNAKE_CASE : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , snake_case ):
__SCREAMING_SNAKE_CASE : Optional[int] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , snake_case ):
for j in range(1 , snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__SCREAMING_SNAKE_CASE : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__SCREAMING_SNAKE_CASE : List[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__SCREAMING_SNAKE_CASE : int = dp[i - 1][j]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
else:
__SCREAMING_SNAKE_CASE : int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowercase_ = """aab"""
lowercase_ = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 74 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase = None
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( lowerCAmelCase__ ):
def remove_articles(lowerCAmelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def a__ ( lowerCAmelCase__ ):
if not s:
return []
return normalize_answer(lowerCAmelCase__ ).split()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = collections.Counter(lowerCAmelCase__ ) & collections.Counter(lowerCAmelCase__ )
UpperCAmelCase_ = sum(common.values() )
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["id"]
UpperCAmelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if not qid_list:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
plt.step(lowerCAmelCase__ , lowerCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCAmelCase__ , lowerCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase__ )
plt.savefig(lowerCAmelCase__ )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(lowerCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(lowerCAmelCase__ )
if i == len(lowerCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase__ )
recalls.append(lowerCAmelCase__ )
if out_image:
plot_pr_curve(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if out_image_dir and not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ = {k: float(lowerCAmelCase__ ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_exact" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_f1" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_oracle" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(lowerCAmelCase__ ) / float(len(lowerCAmelCase__ ) )
plt.hist(lowerCAmelCase__ , weights=lowerCAmelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase__ ), best_thresh
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def a__ ( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
UpperCAmelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(lowerCAmelCase__ ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
else:
print(json.dumps(lowerCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 82 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : Any , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = True , **_A : List[Any] , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Optional[int] = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : Union[str, Any] = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A , default_to_square=_A , param_name='''crop_size''' )
UpperCAmelCase__ : int = do_resize
UpperCAmelCase__ : Tuple = size
UpperCAmelCase__ : Union[str, Any] = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor
UpperCAmelCase__ : List[str] = do_normalize
UpperCAmelCase__ : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase__ : Dict = do_convert_rgb
def lowercase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : Any = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase__ : List[Any] = get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Any , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : int = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , **_A : int , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = size if size is not None else self.size
UpperCAmelCase__ : List[Any] = get_size_dict(_A , param_name='''size''' , default_to_square=_A )
UpperCAmelCase__ : str = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Dict = get_size_dict(_A , param_name='''crop_size''' , default_to_square=_A )
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Tuple = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase__ : str = [convert_to_rgb(_A ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase__ : Dict = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : Optional[Any] = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : List[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Union[str, Any] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
UpperCAmelCase__ : List[Any] = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float(moles / volume ) * nfactor )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> List[Any]:
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : List[Any] = field
__lowercase : List[str] = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
__lowercase : Dict = Json(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , field=UpperCamelCase_ , **UpperCamelCase_ , )
def _lowerCamelCase ( self ) -> str:
# Build iterable dataset
if self.streaming:
__lowercase : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase : List[Any] = None
__lowercase : Any = None
__lowercase : Union[str, Any] = None
__lowercase : Optional[int] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
__lowercase : str = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
__lowercase : Any = dataset
__lowercase : Dict = path_or_buf
__lowercase : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowercase : List[str] = num_proc
__lowercase : Optional[Any] = '''utf-8'''
__lowercase : Tuple = to_json_kwargs
def _lowerCamelCase ( self ) -> int:
__lowercase : str = self.to_json_kwargs.pop('''path_or_buf''' , UpperCamelCase_ )
__lowercase : str = self.to_json_kwargs.pop('''orient''' , '''records''' )
__lowercase : List[Any] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__lowercase : Any = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__lowercase : Tuple = self.to_json_kwargs.pop('''compression''' , UpperCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=UpperCamelCase_ ) as buffer:
__lowercase : List[str] = self._write(file_obj=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
__lowercase : Tuple = self._write(
file_obj=self.path_or_buf , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
return written
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : Dict = args
__lowercase : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(UpperCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowercase : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **UpperCamelCase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ , ) -> int:
__lowercase : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__lowercase : str = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCamelCase_ )
else:
__lowercase ,__lowercase : Dict = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase_ , UpperCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(UpperCamelCase_ )
return written
| 76 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase = 6_378_137.0
lowerCamelCase = 6_356_752.314_245
lowerCamelCase = 6_378_137
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase_ = haversine_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase_ = (b_lata + b_lata) / 2
UpperCAmelCase_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase_ = (sin(lowerCAmelCase__ ) ** 2) * (cos(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = cos(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma - sin(lowerCAmelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase_ = (cos(lowerCAmelCase__ ) ** 2) * (sin(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = sin(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma + sin(lowerCAmelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
A = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split("." ):
__UpperCAmelCase : List[str] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
__UpperCAmelCase : List[str] = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
__UpperCAmelCase : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__UpperCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "bias":
__UpperCAmelCase : int = value
elif weight_type == "running_mean":
__UpperCAmelCase : int = value
elif weight_type == "running_var":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "num_batches_tracked":
__UpperCAmelCase : Any = value
elif weight_type == "inv_freq":
__UpperCAmelCase : List[str] = value
else:
__UpperCAmelCase : Union[str, Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[int] = fairseq_model.state_dict()
__UpperCAmelCase : str = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : List[Any] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__UpperCAmelCase : str = True
if "*" in mapped_key:
__UpperCAmelCase : Tuple = name.split(UpperCamelCase )[0].split("." )[-2]
__UpperCAmelCase : Any = mapped_key.replace("*" , UpperCamelCase )
if "pos_bias_u" in name:
__UpperCAmelCase : Dict = None
elif "pos_bias_v" in name:
__UpperCAmelCase : Optional[Any] = None
elif "weight_g" in name:
__UpperCAmelCase : Union[str, Any] = "weight_g"
elif "weight_v" in name:
__UpperCAmelCase : Dict = "weight_v"
elif "bias" in name:
__UpperCAmelCase : Tuple = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : Tuple = "weight"
elif "running_mean" in name:
__UpperCAmelCase : int = "running_mean"
elif "inv_freq" in name:
__UpperCAmelCase : Tuple = "inv_freq"
elif "running_var" in name:
__UpperCAmelCase : Union[str, Any] = "running_var"
elif "num_batches_tracked" in name:
__UpperCAmelCase : Optional[int] = "num_batches_tracked"
else:
__UpperCAmelCase : List[str] = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = full_name.split("conv_layers." )[-1]
__UpperCAmelCase : List[Any] = name.split("." )
__UpperCAmelCase : List[Any] = int(items[0] )
__UpperCAmelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCAmelCase : Union[str, Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCAmelCase : Dict = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCAmelCase : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCAmelCase : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True ) -> Optional[int]:
"""simple docstring"""
if config_path is not None:
__UpperCAmelCase : int = WavaVecaConformerConfig.from_pretrained(UpperCamelCase , hidden_act="swish" )
else:
__UpperCAmelCase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__UpperCAmelCase : str = "rotary"
if is_finetuned:
if dict_path:
__UpperCAmelCase : int = Dictionary.load(UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase : Optional[Any] = target_dict.pad_index
__UpperCAmelCase : Dict = target_dict.bos_index
__UpperCAmelCase : Dict = target_dict.eos_index
__UpperCAmelCase : List[Any] = len(target_dict.symbols )
__UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase , "vocab.json" )
if not os.path.isdir(UpperCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCamelCase ) )
return
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__UpperCAmelCase : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Tuple = 1
with open(UpperCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = WavaVecaCTCTokenizer(
UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCamelCase , )
__UpperCAmelCase : List[str] = True if config.feat_extract_norm == "layer" else False
__UpperCAmelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase , return_attention_mask=UpperCamelCase , )
__UpperCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
__UpperCAmelCase : Any = WavaVecaConformerForCTC(UpperCamelCase )
else:
__UpperCAmelCase : int = WavaVecaConformerForPreTraining(UpperCamelCase )
if is_finetuned:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__UpperCAmelCase : int = argparse.Namespace(task="audio_pretraining" )
__UpperCAmelCase : List[str] = fairseq.tasks.setup_task(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase )
__UpperCAmelCase : List[str] = model[0].eval()
recursively_load_weights(UpperCamelCase , UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
A = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 77 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = 300
return config
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = MraModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = MraModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = ()
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Tuple =False, False, False
@dataclass
class __A :
a__ : Optional[int] = None
a__ : bool = True
a__ : bool = True
a__ : Optional[str] = None
# Automatically constructed
a__ : ClassVar[str] = "dict"
a__ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a__ : str = field(default="""Audio""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__(self : Optional[Any] ):
return self.pa_type
def _lowercase (self : str , __a : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__a , __a ):
return {"bytes": None, "path": value}
elif isinstance(__a , __a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(__a , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
UpperCAmelCase_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
UpperCAmelCase_ = BytesIO(bytes() )
sf.write(__a , __a , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase (self : Dict , __a : dict , __a : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase_ , UpperCAmelCase_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase_ = xsplitext(__a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split("::" )[-1]
try:
UpperCAmelCase_ = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(__a )
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(__a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate )
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowercase (self : Dict ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _lowercase (self : Optional[Any] , __a : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase_ = pa.array([Audio().encode_example(__a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase_ = storage.field("bytes" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase_ = storage.field("path" )
else:
UpperCAmelCase_ = pa.array([None] * len(__a ) , type=pa.string() )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(__a , self.pa_type )
def _lowercase (self : Dict , __a : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__a : Tuple ):
with xopen(__a , "rb" ) as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
| 78 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase = 50_000
lowerCamelCase = 5_000
lowerCamelCase , lowerCamelCase = os.path.split(__file__)
lowerCamelCase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
def a__ ( ):
UpperCAmelCase_ = {"num examples": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
UpperCAmelCase_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
UpperCAmelCase_ = generate_example_dataset(
os.path.join(lowerCAmelCase__ , "dataset.arrow" ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print("shuffling dataset" )
UpperCAmelCase_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , "wb" ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase="pt" ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {"""add_prefix_space""": True} if isinstance(__lowerCamelCase , __lowerCamelCase ) and not line.startswith(""" """ ) else {}
UpperCAmelCase__ : Dict = padding_side
return tokenizer(
[line] , max_length=__lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : str = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase_ ( __lowerCamelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="train" , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="" , ):
super().__init__()
UpperCAmelCase__ : Any = Path(_lowerCAmelCase ).joinpath(type_path + """.source""" )
UpperCAmelCase__ : List[Any] = Path(_lowerCAmelCase ).joinpath(type_path + """.target""" )
UpperCAmelCase__ : int = self.get_char_lens(self.src_file )
UpperCAmelCase__ : Union[str, Any] = max_source_length
UpperCAmelCase__ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
UpperCAmelCase__ : Optional[Any] = tokenizer
UpperCAmelCase__ : Optional[int] = prefix
if n_obs is not None:
UpperCAmelCase__ : str = self.src_lens[:n_obs]
UpperCAmelCase__ : Union[str, Any] = src_lang
UpperCAmelCase__ : int = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , _lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = index + 1 # linecache starts at 1
UpperCAmelCase__ : List[Any] = self.prefix + linecache.getline(str(self.src_file ) , _lowerCAmelCase ).rstrip("""\n""" )
UpperCAmelCase__ : Union[str, Any] = linecache.getline(str(self.tgt_file ) , _lowerCAmelCase ).rstrip("""\n""" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase__ : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
)
UpperCAmelCase__ : Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
UpperCAmelCase__ : Optional[Any] = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_source_length , """right""" )
UpperCAmelCase__ : Any = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_target_length , """right""" )
UpperCAmelCase__ : Any = source_inputs["""input_ids"""].squeeze()
UpperCAmelCase__ : Tuple = target_inputs["""input_ids"""].squeeze()
UpperCAmelCase__ : Optional[Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCAmelCase ( _lowerCAmelCase ):
return [len(_lowerCAmelCase ) for x in Path(_lowerCAmelCase ).open().readlines()]
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = torch.stack([x["""input_ids"""] for x in batch] )
UpperCAmelCase__ : Tuple = torch.stack([x["""attention_mask"""] for x in batch] )
UpperCAmelCase__ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
UpperCAmelCase__ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : List[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : str = trim_batch(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = trim_batch(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
SCREAMING_SNAKE_CASE__ : Optional[int] = getLogger(__name__)
def _lowerCamelCase ( __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def _lowerCamelCase ( __lowerCamelCase ) -> None:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = get_git_info()
save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , """git_log.json""" ) )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=4 , **__lowerCamelCase ) -> Tuple:
'''simple docstring'''
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase ) -> int:
'''simple docstring'''
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def _lowerCamelCase ( ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Dict = git.Repo(search_parent_directories=__lowerCamelCase )
UpperCAmelCase__ : Tuple = {
"""repo_id""": str(__lowerCamelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> List:
'''simple docstring'''
return list(map(__lowerCamelCase , __lowerCamelCase ) )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
'''simple docstring'''
with open(__lowerCamelCase , """wb""" ) as f:
return pickle.dump(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
def remove_articles(__lowerCamelCase ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , __lowerCamelCase )
def white_space_fix(__lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase ):
UpperCAmelCase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = normalize_answer(__lowerCamelCase ).split()
UpperCAmelCase__ : int = normalize_answer(__lowerCamelCase ).split()
UpperCAmelCase__ : List[Any] = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
UpperCAmelCase__ : Dict = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase__ : Optional[Any] = 1.0 * num_same / len(__lowerCamelCase )
UpperCAmelCase__ : List[Any] = 1.0 * num_same / len(__lowerCamelCase )
UpperCAmelCase__ : str = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
'''simple docstring'''
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = 0
for hypo, pred in zip(__lowerCamelCase , __lowerCamelCase ):
em += exact_match_score(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def _lowerCamelCase ( __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase__ : Any = """dropout_rate"""
for p in extra_params:
if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and not hasattr(__lowerCamelCase , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(__lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
continue
UpperCAmelCase__ : str = p if hasattr(__lowerCamelCase , __lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
return hparams, config
| 79 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''image''': Image()} )
UpperCamelCase = Features({'''labels''': ClassLabel} )
UpperCamelCase = "image"
UpperCamelCase = "labels"
def lowercase__ ( self : str , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowercase__ ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 82 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(_lowerCAmelCase ) , torch_builtin(_lowerCAmelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(_lowerCAmelCase ) , gelu_new(_lowerCAmelCase ) ) )
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase = get_activation("""gelu""" )
__lowercase = get_activation("""gelu_10""" )
__lowercase = torch_builtin(_lowerCAmelCase )
__lowercase = geluaa(_lowerCAmelCase )
__lowercase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_lowerCAmelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(_lowerCAmelCase ):
get_activation("""bogus""" )
with self.assertRaises(_lowerCAmelCase ):
get_activation(_lowerCAmelCase )
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = get_activation("""gelu""" )
__lowercase = 1
__lowercase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_lowerCAmelCase ):
__lowercase = acta.a
| 80 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : int=7 , lowerCamelCase : Dict=3 , lowerCamelCase : Optional[Any]=18 , lowerCamelCase : Optional[Any]=30 , lowerCamelCase : Any=400 , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=None , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : str=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCamelCase : Optional[int]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCamelCase : Optional[int]=True , ) -> List[Any]:
__snake_case : List[str] = size if size is not None else {"height": 224, "width": 224}
__snake_case : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18}
__snake_case : Tuple = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Union[str, Any] = image_size
__snake_case : Union[str, Any] = min_resolution
__snake_case : str = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Optional[int] = size
__snake_case : int = do_center_crop
__snake_case : Dict = crop_size
__snake_case : List[Any] = do_normalize
__snake_case : str = image_mean
__snake_case : Optional[int] = image_std
__snake_case : Union[str, Any] = do_convert_rgb
def __snake_case ( self : str ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any]=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=False ) -> Optional[int]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__snake_case : List[str] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__snake_case : List[str] = []
for i in range(self.batch_size ):
__snake_case , __snake_case : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__snake_case : str = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
__snake_case : Dict = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : int ) -> str:
__snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=lowerCamelCase )
@property
def __snake_case ( self : List[str] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) )
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __snake_case ( self : str ) -> int:
pass
def __snake_case ( self : Optional[int] ) -> Any:
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : List[str] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Any ) -> Dict:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : str = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : Tuple ) -> Optional[int]:
__snake_case : Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowerCamelCase )
__snake_case : Any = 3
@property
def __snake_case ( self : List[Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : List[str] ) -> Union[str, Any]:
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) )
def __snake_case ( self : int ) -> Tuple:
pass
def __snake_case ( self : List[Any] ) -> Any:
# Initialize image_processing
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 81 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ = 20 ):
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def snake_case_ ( A_ : type, A_ : Optional[str], A_ : Optional[List[str]] = None, ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
_lowerCamelCase : Optional[Any] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
_lowerCamelCase : Dict = format_type
def snake_case_ ( A_ : Exception, A_ : Optional[str], A_ : Optional[List[str]] = None ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_lowerCamelCase : List[Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase__ = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase__ = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase__ = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def snake_case_ ( A_ : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def snake_case_ ( A_ : Optional[str], **A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = get_format_type_from_alias(A_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**A_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 83 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
else:
UpperCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
UpperCAmelCase_ = ["key_proj", "value_proj", "query_proj"]
UpperCAmelCase_ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCAmelCase_ = key.split("." )
if attributes[0] == "lm_head":
UpperCAmelCase_ = prophet
UpperCAmelCase_ = prophet_old
else:
UpperCAmelCase_ = prophet.prophetnet
UpperCAmelCase_ = prophet_old.model
UpperCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase_ = mapping[attribute]
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = attribute
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
UpperCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.bias
logger.info(f"""{attribute} is initialized""" )
UpperCAmelCase_ = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase__ , "in_proj_weight" ):
UpperCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase_ = True
break
if attribute.isdigit():
UpperCAmelCase_ = model[int(lowerCAmelCase__ )]
UpperCAmelCase_ = old_model[int(lowerCAmelCase__ )]
else:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_attribute == "":
UpperCAmelCase_ = old_model
else:
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 82 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
UpperCAmelCase = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
UpperCAmelCase = {
'''jukebox''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_LYRIC_TOKENS_SIZES
_UpperCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case , snake_case , snake_case=["v3", "v2", "v2"] , snake_case=512 , snake_case=5 , snake_case="<|endoftext|>" , **snake_case , ):
lowercase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
super().__init__(
unk_token=snake_case , n_genres=snake_case , version=snake_case , max_n_lyric_tokens=snake_case , **snake_case , )
lowercase = version
lowercase = max_n_lyric_tokens
lowercase = n_genres
with open(snake_case , encoding='utf-8' ) as vocab_handle:
lowercase = json.load(snake_case )
with open(snake_case , encoding='utf-8' ) as vocab_handle:
lowercase = json.load(snake_case )
with open(snake_case , encoding='utf-8' ) as vocab_handle:
lowercase = json.load(snake_case )
lowercase = r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowercase = oov.replace(r'\-\'' , r'\-+\'' )
lowercase = regex.compile(snake_case )
lowercase = {v: k for k, v in self.artists_encoder.items()}
lowercase = {v: k for k, v in self.genres_encoder.items()}
lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = [self.artists_encoder.get(snake_case , 0 ) for artist in list_artists]
for genres in range(len(snake_case ) ):
lowercase = [self.genres_encoder.get(snake_case , 0 ) for genre in list_genres[genres]]
lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowercase = [[self.lyrics_encoder.get(snake_case , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return list(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
lowercase , lowercase , lowercase = self.prepare_for_tokenization(snake_case , snake_case , snake_case )
lowercase = self._tokenize(snake_case )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowercase = artists[idx].lower()
lowercase = [genres[idx].lower()]
else:
lowercase = self._normalize(artists[idx] ) + '.v2'
lowercase = [
self._normalize(snake_case ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowercase = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
lowercase = {vocab[index]: index + 1 for index in range(len(snake_case ) )}
lowercase = 0
lowercase = len(snake_case ) + 1
lowercase = self.vocab
lowercase = {v: k for k, v in self.vocab.items()}
lowercase = ''
else:
lowercase = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
lowercase = self._run_strip_accents(snake_case )
lowercase = lyrics.replace('\\' , '\n' )
lowercase = self.out_of_vocab.sub('' , snake_case ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = unicodedata.normalize('NFD' , snake_case )
lowercase = []
for char in text:
lowercase = unicodedata.category(snake_case )
if cat == "Mn":
continue
output.append(snake_case )
return "".join(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = (
[chr(snake_case ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(snake_case ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(snake_case ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
lowercase = frozenset(snake_case )
lowercase = re.compile(r'_+' )
lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
lowercase = pattern.sub('_' , snake_case ).strip('_' )
return text
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return " ".join(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ):
# Convert to TensorType
if not isinstance(snake_case , snake_case ):
lowercase = TensorType(snake_case )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
lowercase = tf.constant
lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
lowercase = torch.tensor
lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
lowercase = jnp.array
lowercase = _is_jax
else:
lowercase = np.asarray
lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowercase = [inputs]
if not is_tensor(snake_case ):
lowercase = as_tensor(snake_case )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self , snake_case , snake_case , snake_case="" , snake_case="pt" ):
lowercase = [0, 0, 0]
lowercase = [artist] * len(self.version )
lowercase = [genres] * len(self.version )
lowercase , lowercase , lowercase = self.tokenize(snake_case , snake_case , snake_case )
lowercase , lowercase , lowercase = self._convert_token_to_id(snake_case , snake_case , snake_case )
lowercase = [-INFINITY] * len(full_tokens[-1] )
lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=snake_case )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=snake_case ) )
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=snake_case ) )
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=snake_case ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.artists_decoder.get(snake_case )
lowercase = [self.genres_decoder.get(snake_case ) for genre in genres_index]
lowercase = [self.lyrics_decoder.get(snake_case ) for character in lyric_index]
return artist, genres, lyrics
| 84 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(lowerCAmelCase__ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) <= 1:
return arr, 0
UpperCAmelCase_ = len(lowerCAmelCase__ ) // 2
UpperCAmelCase_ = arr[0:mid]
UpperCAmelCase_ = arr[mid:]
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = _count_cross_inversions(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = 0
while i < len(lowerCAmelCase__ ) and j < len(lowerCAmelCase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a__ ( ):
UpperCAmelCase_ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowerCAmelCase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase__ )
# an empty list should also have zero inversions
UpperCAmelCase_ = []
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 82 | 0 |
def _a ( lowercase__ : str ):
'''simple docstring'''
if n_term == "":
return []
SCREAMING_SNAKE_CASE__ : list = []
for temp in range(int(lowercase__ ) ):
series.append(f'''1/{temp + 1}''' if series else '1' )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 85 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase_ = len(bin(lowerCAmelCase__ )[3:] )
UpperCAmelCase_ = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _a ( yaml.SafeLoader ):
"""simple docstring"""
def __A ( self : Optional[Any] , UpperCAmelCase : int ):
A_ = [self.constructed_objects[key_node] for key_node, _ in node.value]
A_ = [tuple(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else key for key in keys]
A_ = Counter(UpperCAmelCase )
A_ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def __A ( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=False ):
A_ = super().construct_mapping(UpperCAmelCase , deep=UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(UpperCAmelCase )
return mapping
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A_ = full_content[1:].index("---" ) + 1
A_ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__UpperCamelCase )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __A ( cls : Dict , UpperCAmelCase : Path ):
with open(UpperCAmelCase , encoding="utf-8" ) as readme_file:
A_ , A_ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase )
else:
return cls()
def __A ( self : Optional[int] , UpperCAmelCase : Path ):
if path.exists():
with open(UpperCAmelCase , encoding="utf-8" ) as readme_file:
A_ = readme_file.read()
else:
A_ = None
A_ = self._to_readme(UpperCAmelCase )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(UpperCAmelCase )
def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[str] = None ):
if readme_content is not None:
A_ , A_ = _split_yaml_from_readme(UpperCAmelCase )
A_ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
A_ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __A ( cls : Tuple , UpperCAmelCase : str ):
A_ = yaml.load(UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A_ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase )
def __A ( self : List[Any] ):
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase , allow_unicode=UpperCAmelCase , encoding="utf-8" , ).decode("utf-8" )
__a :Optional[Any] = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__a :Optional[int] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
__a :str = ap.parse_args()
__a :Optional[Any] = Path(args.readme_filepath)
__a :Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 86 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **_UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(_UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , _UpperCAmelCase : Union[str, List[str]] = None , **_UpperCAmelCase : Optional[int] , ) -> List[Any]:
'''simple docstring'''
if "text_queries" in kwargs:
UpperCAmelCase_ = kwargs.pop("text_queries" )
if isinstance(_UpperCAmelCase , (str, Image.Image) ):
UpperCAmelCase_ = {"image": image, "candidate_labels": candidate_labels}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def lowercase__ ( self : str , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {}
if "threshold" in kwargs:
UpperCAmelCase_ = kwargs["threshold"]
if "top_k" in kwargs:
UpperCAmelCase_ = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self : int , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = load_image(inputs["image"] )
UpperCAmelCase_ = inputs["candidate_labels"]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = candidate_labels.split("," )
UpperCAmelCase_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : int , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = model_inputs.pop("target_size" )
UpperCAmelCase_ = model_inputs.pop("candidate_label" )
UpperCAmelCase_ = model_inputs.pop("is_last" )
UpperCAmelCase_ = self.model(**_UpperCAmelCase )
UpperCAmelCase_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[str]=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
for model_output in model_outputs:
UpperCAmelCase_ = model_output["candidate_label"]
UpperCAmelCase_ = BaseModelOutput(_UpperCAmelCase )
UpperCAmelCase_ = self.image_processor.post_process_object_detection(
outputs=_UpperCAmelCase , threshold=_UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
UpperCAmelCase_ = outputs["scores"][index].item()
UpperCAmelCase_ = self._get_bounding_box(outputs["boxes"][index][0] )
UpperCAmelCase_ = {"score": score, "label": label, "box": box}
results.append(_UpperCAmelCase )
UpperCAmelCase_ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )
if top_k:
UpperCAmelCase_ = results[:top_k]
return results
def lowercase__ ( self : str , _UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = box.int().tolist()
UpperCAmelCase_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Tuple = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=30 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Dict=None , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel(config=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
UpperCAmelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase__ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , tf.keras.layers.Layer ) )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 )
| 82 | 0 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowercase__ ( A_ ):
__UpperCAmelCase = 42
__UpperCAmelCase = jnp.floataa
__UpperCAmelCase = True
def UpperCamelCase_ ( self) -> str:
super().setup()
_lowerCamelCase : Optional[int] = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Optional[int] = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class lowercase__ ( A_ ):
__UpperCAmelCase = FlaxBigBirdForNaturalQuestionsModule
def _snake_case ( __snake_case : Tuple , __snake_case : str , __snake_case : List[str] , __snake_case : Dict , __snake_case : Tuple , __snake_case : List[Any] ):
"""simple docstring"""
def cross_entropy(__snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[int]=None ):
_lowerCamelCase : Optional[int] = logits.shape[-1]
_lowerCamelCase : Any = (labels[..., None] == jnp.arange(__snake_case )[None]).astype("""f4""" )
_lowerCamelCase : Any = jax.nn.log_softmax(__snake_case , axis=-1 )
_lowerCamelCase : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_lowerCamelCase : int = reduction(__snake_case )
return loss
_lowerCamelCase : int = partial(__snake_case , reduction=jnp.mean )
_lowerCamelCase : int = cross_entropy(__snake_case , __snake_case )
_lowerCamelCase : int = cross_entropy(__snake_case , __snake_case )
_lowerCamelCase : List[str] = cross_entropy(__snake_case , __snake_case )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowercase__ :
__UpperCAmelCase = "google/bigbird-roberta-base"
__UpperCAmelCase = 3000
__UpperCAmelCase = 10500
__UpperCAmelCase = 128
__UpperCAmelCase = 3
__UpperCAmelCase = 1
__UpperCAmelCase = 5
# tx_args
__UpperCAmelCase = 3e-5
__UpperCAmelCase = 0.0
__UpperCAmelCase = 20000
__UpperCAmelCase = 0.0_0_9_5
__UpperCAmelCase = "bigbird-roberta-natural-questions"
__UpperCAmelCase = "training-expt"
__UpperCAmelCase = "data/nq-training.jsonl"
__UpperCAmelCase = "data/nq-validation.jsonl"
def UpperCamelCase_ ( self) -> Optional[int]:
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = os.path.join(self.base_dir , self.save_dir)
_lowerCamelCase : Tuple = self.batch_size_per_device * jax.device_count()
@dataclass
class lowercase__ :
__UpperCAmelCase = 42
__UpperCAmelCase = 4096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : Dict = self.collate_fn(SCREAMING_SNAKE_CASE)
_lowerCamelCase : int = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return batch
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase , _lowerCamelCase : Any = self.fetch_inputs(features["""input_ids"""])
_lowerCamelCase : Dict = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : List[str] = [self._fetch_inputs(SCREAMING_SNAKE_CASE) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict:
_lowerCamelCase : List[Any] = [1 for _ in range(len(SCREAMING_SNAKE_CASE))]
while len(SCREAMING_SNAKE_CASE) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def _snake_case ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any]=None ):
"""simple docstring"""
if seed is not None:
_lowerCamelCase : Optional[Any] = dataset.shuffle(seed=__snake_case )
for i in range(len(__snake_case ) // batch_size ):
_lowerCamelCase : int = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__snake_case )
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case ( __snake_case : Any , __snake_case : List[str] , **__snake_case : Tuple ):
"""simple docstring"""
def loss_fn(__snake_case : str ):
_lowerCamelCase : List[Any] = model_inputs.pop("""start_labels""" )
_lowerCamelCase : Optional[Any] = model_inputs.pop("""end_labels""" )
_lowerCamelCase : List[str] = model_inputs.pop("""pooled_labels""" )
_lowerCamelCase : Optional[Any] = state.apply_fn(**__snake_case , params=__snake_case , dropout_rng=__snake_case , train=__snake_case )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = outputs
return state.loss_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_lowerCamelCase , _lowerCamelCase : List[Any] = jax.random.split(__snake_case )
_lowerCamelCase : Any = jax.value_and_grad(__snake_case )
_lowerCamelCase , _lowerCamelCase : Any = grad_fn(state.params )
_lowerCamelCase : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
_lowerCamelCase : Tuple = jax.lax.pmean(__snake_case , """batch""" )
_lowerCamelCase : int = state.apply_gradients(grads=__snake_case )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case ( __snake_case : Any , **__snake_case : int ):
"""simple docstring"""
_lowerCamelCase : Any = model_inputs.pop("""start_labels""" )
_lowerCamelCase : Any = model_inputs.pop("""end_labels""" )
_lowerCamelCase : Any = model_inputs.pop("""pooled_labels""" )
_lowerCamelCase : Optional[Any] = state.apply_fn(**__snake_case , params=state.params , train=__snake_case )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = outputs
_lowerCamelCase : Tuple = state.loss_fn(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
_lowerCamelCase : str = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class lowercase__ ( train_state.TrainState ):
__UpperCAmelCase = struct.field(pytree_node=A_ )
@dataclass
class lowercase__ :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = None
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None) -> Optional[int]:
_lowerCamelCase : Tuple = model.params
_lowerCamelCase : Any = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE , tx=SCREAMING_SNAKE_CASE , loss_fn=SCREAMING_SNAKE_CASE , )
if ckpt_dir is not None:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = restore_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
_lowerCamelCase , _lowerCamelCase : Optional[Any] = build_tx(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE , tx=SCREAMING_SNAKE_CASE , opt_state=SCREAMING_SNAKE_CASE , )
_lowerCamelCase : List[Any] = args
_lowerCamelCase : Optional[Any] = data_collator
_lowerCamelCase : Optional[Any] = lr
_lowerCamelCase : Optional[Any] = params
_lowerCamelCase : Optional[Any] = jax_utils.replicate(SCREAMING_SNAKE_CASE)
return state
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : Dict = self.args
_lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE) // args.batch_size
_lowerCamelCase : Optional[Any] = jax.random.PRNGKey(0)
_lowerCamelCase : List[Any] = jax.random.split(SCREAMING_SNAKE_CASE , jax.device_count())
for epoch in range(args.max_epochs):
_lowerCamelCase : Tuple = jnp.array(0 , dtype=jnp.floataa)
_lowerCamelCase : Tuple = get_batched_dataset(SCREAMING_SNAKE_CASE , args.batch_size , seed=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = 0
for batch in tqdm(SCREAMING_SNAKE_CASE , total=SCREAMING_SNAKE_CASE , desc=F'Running EPOCH-{epoch}'):
_lowerCamelCase : Union[str, Any] = self.data_collator(SCREAMING_SNAKE_CASE)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.train_step_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
_lowerCamelCase : Optional[int] = jax_utils.unreplicate(state.step)
_lowerCamelCase : int = running_loss.item() / i
_lowerCamelCase : Tuple = self.scheduler_fn(state_step - 1)
_lowerCamelCase : Optional[Any] = self.evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE))
self.logger.log(SCREAMING_SNAKE_CASE , commit=SCREAMING_SNAKE_CASE)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]:
_lowerCamelCase : Optional[int] = get_batched_dataset(SCREAMING_SNAKE_CASE , self.args.batch_size)
_lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE) // self.args.batch_size
_lowerCamelCase : str = jnp.array(0 , dtype=jnp.floataa)
_lowerCamelCase : int = 0
for batch in tqdm(SCREAMING_SNAKE_CASE , total=SCREAMING_SNAKE_CASE , desc="""Evaluating ... """):
_lowerCamelCase : int = self.data_collator(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = self.val_step_fn(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE)
print(F'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE)
print("""DONE""")
def _snake_case ( __snake_case : Any , __snake_case : Any ):
"""simple docstring"""
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(__snake_case , """flax_model.msgpack""" ) , """rb""" ) as f:
_lowerCamelCase : Dict = from_bytes(state.params , f.read() )
with open(os.path.join(__snake_case , """opt_state.msgpack""" ) , """rb""" ) as f:
_lowerCamelCase : str = from_bytes(state.opt_state , f.read() )
_lowerCamelCase : Optional[Any] = joblib.load(os.path.join(__snake_case , """args.joblib""" ) )
_lowerCamelCase : Optional[int] = joblib.load(os.path.join(__snake_case , """data_collator.joblib""" ) )
with open(os.path.join(__snake_case , """training_state.json""" ) , """r""" ) as f:
_lowerCamelCase : str = json.load(__snake_case )
_lowerCamelCase : Tuple = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _snake_case ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = num_train_steps - warmup_steps
_lowerCamelCase : Optional[int] = optax.linear_schedule(init_value=__snake_case , end_value=__snake_case , transition_steps=__snake_case )
_lowerCamelCase : List[str] = optax.linear_schedule(init_value=__snake_case , end_value=1E-7 , transition_steps=__snake_case )
_lowerCamelCase : Optional[Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case ( __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Dict ):
"""simple docstring"""
def weight_decay_mask(__snake_case : Union[str, Any] ):
_lowerCamelCase : Any = traverse_util.flatten_dict(__snake_case )
_lowerCamelCase : List[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(__snake_case )
_lowerCamelCase : List[str] = scheduler_fn(__snake_case , __snake_case , __snake_case , __snake_case )
_lowerCamelCase : str = optax.adamw(learning_rate=__snake_case , weight_decay=__snake_case , mask=__snake_case )
return tx, lr
| 88 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowerCamelCase = {
"""facebook/bart-base""": 1_024,
"""facebook/bart-large""": 1_024,
"""facebook/bart-large-mnli""": 1_024,
"""facebook/bart-large-cnn""": 1_024,
"""facebook/bart-large-xsum""": 1_024,
"""yjernite/bart_eli5""": 1_024,
}
@lru_cache()
def a__ ( ):
UpperCAmelCase_ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCAmelCase_ = bs[:]
UpperCAmelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ = char
return pairs
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]="replace" , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : List[Any]="<mask>" , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : Dict , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = errors # how to handle errors in decoding
UpperCAmelCase_ = bytes_to_unicode()
UpperCAmelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase_ = {}
UpperCAmelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ = tuple(_UpperCAmelCase )
UpperCAmelCase_ = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase_ = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ = bigram
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while i < len(_UpperCAmelCase ):
try:
UpperCAmelCase_ = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ = tuple(_UpperCAmelCase )
UpperCAmelCase_ = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase_ = get_pairs(_UpperCAmelCase )
UpperCAmelCase_ = " ".join(_UpperCAmelCase )
UpperCAmelCase_ = word
return word
def lowercase__ ( self : Dict , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = []
for token in re.findall(self.pat , _UpperCAmelCase ):
UpperCAmelCase_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(" " ) )
return bpe_tokens
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "".join(_UpperCAmelCase )
UpperCAmelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + "\n" )
UpperCAmelCase_ = 0
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(" ".join(_UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowercase__ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
UpperCAmelCase_ = " " + text
return (text, kwargs)
| 82 | 0 |
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE : str = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
SCREAMING_SNAKE_CASE : Dict = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
SCREAMING_SNAKE_CASE : Union[str, Any] = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCamelCase( datasets.Metric ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence'),
'references': datasets.Value('string', id='sequence'),
}), reference_urls=[], )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, ) -> Any:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowercase : List[str] = np.array([re.sub(lowerCamelCase, '', lowerCamelCase) for x in predictions])
_lowercase : Optional[Any] = np.array([re.sub(lowerCamelCase, '', lowerCamelCase) for x in references])
else:
_lowercase : Dict = np.asarray(lowerCamelCase)
_lowercase : Dict = np.asarray(lowerCamelCase)
if ignore_case:
_lowercase : Union[str, Any] = np.char.lower(lowerCamelCase)
_lowercase : List[str] = np.char.lower(lowerCamelCase)
if ignore_punctuation:
_lowercase : Optional[Any] = string.punctuation.maketrans('', '', string.punctuation)
_lowercase : Any = np.char.translate(lowerCamelCase, table=lowerCamelCase)
_lowercase : Optional[int] = np.char.translate(lowerCamelCase, table=lowerCamelCase)
if ignore_numbers:
_lowercase : List[str] = string.digits.maketrans('', '', string.digits)
_lowercase : int = np.char.translate(lowerCamelCase, table=lowerCamelCase)
_lowercase : List[str] = np.char.translate(lowerCamelCase, table=lowerCamelCase)
_lowercase : str = predictions == references
return {"exact_match": np.mean(lowerCamelCase) * 1_00}
| 89 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
UpperCAmelCase_ = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 82 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def _snake_case ( A , A ) -> float:
lowerCAmelCase__ = u
for i in range(1 , A ):
lowerCAmelCase__ = temp * (u - i)
return temp
def _snake_case ( ) -> None:
lowerCAmelCase__ = int(input('''enter the numbers of values: ''' ) )
lowerCAmelCase__ = []
for _ in range(A ):
y.append([] )
for i in range(A ):
for j in range(A ):
y[i].append(A )
lowerCAmelCase__ = 0
print('''enter the values of parameters in a list: ''' )
lowerCAmelCase__ = list(map(A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(A ):
lowerCAmelCase__ = float(input() )
lowerCAmelCase__ = int(input('''enter the value to interpolate: ''' ) )
lowerCAmelCase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , A ):
for j in range(n - i ):
lowerCAmelCase__ = y[j + 1][i - 1] - y[j][i - 1]
lowerCAmelCase__ = y[0][0]
for i in range(1 , A ):
summ += (ucal(A , A ) * y[0][i]) / math.factorial(A )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main() | 90 |
"""simple docstring"""
lowerCamelCase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
UpperCAmelCase_ = Stack()
UpperCAmelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCAmelCase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCAmelCase__ )
elif i == ")":
# RULE 4
UpperCAmelCase_ = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operators[opr](lowerCAmelCase__ , lowerCAmelCase__ )
operand_stack.push(lowerCAmelCase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def a__ ( lowerCAmelCase__ = 35 ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Any =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : Optional[int] =PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
torch.manual_seed(0 )
lowercase : Union[str, Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : int =CLIPTextModel(UpperCAmelCase__ )
lowercase : List[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : List[str] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int]=0 ):
'''simple docstring'''
lowercase : Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : str =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Any =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' )
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : Dict =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : List[Any] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Dict ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str =self.get_dummy_components()
lowercase : str =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =sd_pipe(**UpperCAmelCase__ ).images
lowercase : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Union[str, Any] =np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Tuple =self.get_dummy_components()
lowercase : Optional[int] =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Tuple ='''french fries'''
lowercase : Dict =sd_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
lowercase : List[str] =output.images
lowercase : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Any =np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str =self.get_dummy_components()
lowercase : Any =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =[inputs['''prompt''']] * 2
lowercase : int =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_55.0
lowercase : Tuple =torch.from_numpy(UpperCAmelCase__ ).unsqueeze(0 ).to(UpperCAmelCase__ )
lowercase : List[Any] =image / 2 + 0.5
lowercase : List[str] =image.permute(0 , 3 , 1 , 2 )
lowercase : List[str] =image.repeat(2 , 1 , 1 , 1 )
lowercase : List[Any] =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Optional[int] =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase : Union[str, Any] =np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Union[str, Any] =self.get_dummy_components()
lowercase : Any =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
lowercase : Any =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Dict =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Any =image[0, -3:, -3:, -1]
lowercase : List[str] =[round(UpperCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(UpperCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase : Dict =np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Optional[int] =self.get_dummy_components()
lowercase : Union[str, Any] =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =VaeImageProcessor(do_resize=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ )
lowercase : str =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[str] =pipe(**self.get_dummy_inputs_by_type(UpperCAmelCase__ , input_image_type='''pt''' ) )[0]
lowercase : List[str] =components['''vae''']
lowercase : str =self.get_dummy_inputs_by_type(UpperCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase : List[str] =vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase : List[Any] =pipe(**UpperCAmelCase__ )[0]
lowercase : Optional[int] =np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str=0 ):
'''simple docstring'''
lowercase : Any =torch.manual_seed(UpperCAmelCase__ )
lowercase : List[Any] =load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
lowercase : Optional[int] ={
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[Any] =self.get_inputs()
lowercase : Any =pipe(**UpperCAmelCase__ ).images
lowercase : Union[str, Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : Any =np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ )
lowercase : int =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Tuple =self.get_inputs()
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ ).images
lowercase : Any =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : Any =np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ )
lowercase : Dict =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : List[Any] =self.get_inputs()
lowercase : Dict =pipe(**UpperCAmelCase__ ).images
lowercase : List[str] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[int] =np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =0
def callback_fn(UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : torch.FloatTensor ) -> None:
lowercase : str =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase : Optional[int] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : Any =latents[0, -3:, -3:, -1]
lowercase : int =np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase : Union[str, Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : Dict =latents[0, -3:, -3:, -1]
lowercase : Any =np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase : Union[str, Any] =False
lowercase : Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
lowercase : List[str] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[int] =self.get_inputs()
pipe(**UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Tuple =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
lowercase : List[str] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : Dict =self.get_inputs()
lowercase : List[Any] =pipe(**UpperCAmelCase__ )
lowercase : List[str] =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[str] =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase : Dict =inputs['''image'''].resize((504, 504) )
lowercase : Any ='''timbrooks/instruct-pix2pix'''
lowercase : str =StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ )
lowercase : Dict =output.images[0]
lowercase : Tuple =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase : Optional[Any] =np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 92 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _lowerCAmelCase ( a ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
lowerCAmelCase__ :Tuple = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCAmelCase__ :str = bertabert.config.encoder.vocab_size
lowerCAmelCase__ :Tuple = tokenizer.sep_token_id
lowerCAmelCase__ :int = tokenizer.cls_token_id
lowerCAmelCase__ :Any = 1_2_8
lowerCAmelCase__ :List[str] = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
lowerCAmelCase__ :str = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
lowerCAmelCase__ :Optional[Any] = train_dataset.select(range(3_2 ) )
lowerCAmelCase__ :Tuple = val_dataset.select(range(1_6 ) )
lowerCAmelCase__ :str = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCAmelCase__ :int = tokenizer(batch['article'] , padding='max_length' , truncation=__UpperCAmelCase , max_length=5_1_2 )
lowerCAmelCase__ :int = tokenizer(batch['highlights'] , padding='max_length' , truncation=__UpperCAmelCase , max_length=1_2_8 )
lowerCAmelCase__ :Any = inputs.input_ids
lowerCAmelCase__ :Union[str, Any] = inputs.attention_mask
lowerCAmelCase__ :Dict = outputs.input_ids
lowerCAmelCase__ :Optional[Any] = outputs.input_ids.copy()
lowerCAmelCase__ :List[str] = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
lowerCAmelCase__ :Dict = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 5_1_2 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = pred.label_ids
lowerCAmelCase__ :List[Any] = pred.predictions
# all unnecessary tokens are removed
lowerCAmelCase__ :Tuple = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :Any = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :Dict = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowerCAmelCase__ :str = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
lowerCAmelCase__ :List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
lowerCAmelCase__ :str = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ :List[str] = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='steps' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCAmelCase__ :Any = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 93 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''vit'''
def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=16 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = encoder_stride
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 82 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__A ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''shortest_edge''': 224}
lowercase : str =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowercase : List[str] =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : List[Any] =get_size_dict(UpperCAmelCase , param_name='''crop_size''' )
lowercase : Tuple =do_resize
lowercase : Any =size
lowercase : Optional[Any] =do_center_crop
lowercase : str =crop_size
lowercase : Any =resample
lowercase : List[Any] =do_rescale
lowercase : Dict =rescale_factor
lowercase : List[str] =do_normalize
lowercase : Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ) -> np.ndarray:
'''simple docstring'''
lowercase : str =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" in size:
lowercase : int =get_resize_output_image_size(UpperCAmelCase , size['''shortest_edge'''] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
lowercase : str =(size['''height'''], size['''width'''])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : List[str] =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ) -> List[Any]:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict =to_numpy_array(UpperCAmelCase )
if do_resize:
lowercase : Union[str, Any] =self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase )
if do_center_crop:
lowercase : Optional[Any] =self.center_crop(UpperCAmelCase , size=UpperCAmelCase )
if do_rescale:
lowercase : Dict =self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase )
if do_normalize:
lowercase : int =self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase )
lowercase : Optional[Any] =to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase )
return image
def A__ ( self : List[str] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Any =do_resize if do_resize is not None else self.do_resize
lowercase : Union[str, Any] =resample if resample is not None else self.resample
lowercase : Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : str =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[str] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Optional[Any] =size if size is not None else self.size
lowercase : Any =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowercase : Union[str, Any] =crop_size if crop_size is not None else self.crop_size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase , param_name='''crop_size''' )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : List[str] =make_batched(UpperCAmelCase )
lowercase : Union[str, Any] =[
[
self._preprocess_image(
image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , )
for img in video
]
for video in videos
]
lowercase : Dict ={'''pixel_values''': videos}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 94 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : int=30 , _UpperCAmelCase : Tuple=400 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 20, "width": 20}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_convert_rgb
UpperCAmelCase_ = [512, 1024, 2048, 4096]
UpperCAmelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ = 2048
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
UpperCAmelCase_ = "Hello"
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ = 3
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 82 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int , ) -> Tuple:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Tuple = 13
UpperCAmelCase_ : int = 7
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : List[Any] = 99
UpperCAmelCase_ : Any = 32
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : Dict = 37
UpperCAmelCase_ : Optional[int] = "gelu"
UpperCAmelCase_ : List[str] = 0.1
UpperCAmelCase_ : Tuple = 0.1
UpperCAmelCase_ : Any = 512
UpperCAmelCase_ : List[str] = 16
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Union[str, Any] = 0.0_2
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Optional[int] = 4
UpperCAmelCase_ : Dict = None
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = None
if self.use_labels:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Dict = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any ) -> str:
UpperCAmelCase_ : Optional[Any] = TFDistilBertModel(config=lowerCAmelCase_ )
UpperCAmelCase_ : Any = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase_ : str = model(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = [input_ids, input_mask]
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : Dict = TFDistilBertForMaskedLM(config=lowerCAmelCase_ )
UpperCAmelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase_ : List[str] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Dict:
UpperCAmelCase_ : Optional[Any] = TFDistilBertForQuestionAnswering(config=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase_ : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Tuple = TFDistilBertForSequenceClassification(lowerCAmelCase_ )
UpperCAmelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : List[Any] = TFDistilBertForMultipleChoice(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : List[Any] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : Any = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[str] = TFDistilBertForTokenClassification(lowerCAmelCase_ )
UpperCAmelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : str = config_and_inputs
UpperCAmelCase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__magic_name__ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : Any = TFDistilBertModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase_ : Tuple = TFDistilBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : Dict = model(lowerCAmelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
| 95 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def a__ ( lowerCAmelCase__ ):
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = get_config(lowerCAmelCase__ )
# load original model from timm
UpperCAmelCase_ = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(lowerCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_ = processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase__ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 | 0 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowerCamelCase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowerCamelCase = [0, 25, 50]
__lowerCamelCase = [25, 50, 75]
__lowerCamelCase = fuzz.membership.trimf(X, abca)
__lowerCamelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowerCamelCase = np.ones(75)
__lowerCamelCase = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowerCamelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowerCamelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowerCamelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowerCamelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowerCamelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowerCamelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowerCamelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowerCamelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 96 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase_ = list(accumulate(lowerCAmelCase__ ) )
UpperCAmelCase_ = bisect(lowerCAmelCase__ , lowerCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[int] = 'xglm'
a :int = ['past_key_values']
a :Union[str, Any] = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_5_6_0_0_8 , SCREAMING_SNAKE_CASE_ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE_ : str=4_0_9_6 , SCREAMING_SNAKE_CASE_ : int=2_4 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE_ : Any=2 , **SCREAMING_SNAKE_CASE_ : str , ) -> str:
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = d_model
lowercase_ = ffn_dim
lowercase_ = num_layers
lowercase_ = attention_heads
lowercase_ = activation_function
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = layerdrop
lowercase_ = init_std
lowercase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 97 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase = None
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( lowerCAmelCase__ ):
def remove_articles(lowerCAmelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def a__ ( lowerCAmelCase__ ):
if not s:
return []
return normalize_answer(lowerCAmelCase__ ).split()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = collections.Counter(lowerCAmelCase__ ) & collections.Counter(lowerCAmelCase__ )
UpperCAmelCase_ = sum(common.values() )
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["id"]
UpperCAmelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if not qid_list:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
plt.step(lowerCAmelCase__ , lowerCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCAmelCase__ , lowerCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase__ )
plt.savefig(lowerCAmelCase__ )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(lowerCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(lowerCAmelCase__ )
if i == len(lowerCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase__ )
recalls.append(lowerCAmelCase__ )
if out_image:
plot_pr_curve(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if out_image_dir and not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ = {k: float(lowerCAmelCase__ ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_exact" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_f1" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_oracle" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(lowerCAmelCase__ ) / float(len(lowerCAmelCase__ ) )
plt.hist(lowerCAmelCase__ , weights=lowerCAmelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase__ ), best_thresh
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def a__ ( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
UpperCAmelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(lowerCAmelCase__ ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
else:
print(json.dumps(lowerCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 82 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : str = {'vocab_file': 'vocab.json'}
lowercase__ : str = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
lowercase__ : Union[str, Any] = {'mgp-str': 27}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str="[GO]" , lowerCAmelCase__ : Union[str, Any]="[GO]" , lowerCAmelCase__ : str="[s]" , lowerCAmelCase__ : Tuple="[GO]" , **lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__(
unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(lowerCAmelCase__ )
_UpperCamelCase = {v: k for k, v in self.vocab.items()}
@property
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
for s in text:
char_tokens.extend(lowerCAmelCase__ )
return char_tokens
def snake_case__ ( self : Any , lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '''\n''' )
return (vocab_file,)
| 98 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float(moles / volume ) * nfactor )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
def a (lowerCAmelCase__ ):
if not numbers:
return 0
if not isinstance(lowerCAmelCase__ , (list, tuple) ) or not all(
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__a = __a = __a = numbers[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
# update the maximum and minimum subarray products
__a = numbers[i]
if number < 0:
__a , __a = min_till_now, max_till_now
__a = max(lowerCAmelCase__ , max_till_now * number )
__a = min(lowerCAmelCase__ , min_till_now * number )
# update the maximum product found till now
__a = max(lowerCAmelCase__ , lowerCAmelCase__ )
return max_prod
| 99 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase = 6_378_137.0
lowerCamelCase = 6_356_752.314_245
lowerCamelCase = 6_378_137
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase_ = haversine_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase_ = (b_lata + b_lata) / 2
UpperCAmelCase_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase_ = (sin(lowerCAmelCase__ ) ** 2) * (cos(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = cos(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma - sin(lowerCAmelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase_ = (cos(lowerCAmelCase__ ) ** 2) * (sin(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = sin(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma + sin(lowerCAmelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
from __future__ import annotations
_A : int = """#"""
class __snake_case :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = trie[char]
SCREAMING_SNAKE_CASE__ = True
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE__ = trie[char]
else:
return []
return self._elements(A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE__ = [''' '''] if c == END else [(c + s) for s in self._elements(A_ )]
result.extend(A_ )
return tuple(A_ )
_A : Any = Trie()
_A : Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __snake_case ( lowerCAmelCase_ ) -> tuple:
SCREAMING_SNAKE_CASE__ = trie.find_word(lowerCAmelCase_ )
return tuple(string + word for word in suffixes )
def __snake_case ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 100 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = 300
return config
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = MraModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = MraModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = ()
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.