code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase_ : Optional[int] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase = val
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase = value
else:
UpperCAmelCase = value
return new_state_dict
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[:256, :]
UpperCAmelCase = in_proj_bias[:256]
UpperCAmelCase = in_proj_weight[256:512, :]
UpperCAmelCase = in_proj_bias[256:512]
UpperCAmelCase = in_proj_weight[-256:, :]
UpperCAmelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[:256, :]
UpperCAmelCase = in_proj_bias[:256]
UpperCAmelCase = in_proj_weight[256:512, :]
UpperCAmelCase = in_proj_bias[256:512]
UpperCAmelCase = in_proj_weight[-256:, :]
UpperCAmelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase = in_proj_bias_cross_attn[:256]
UpperCAmelCase = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase = in_proj_bias_cross_attn[256:512]
UpperCAmelCase = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase = in_proj_bias_cross_attn[-256:]
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = image.size
UpperCAmelCase = max(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = 800 if """detection""" in checkpoint_url else 1000
UpperCAmelCase = target_max_size / current_max_size
UpperCAmelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = F.to_tensor(lowerCAmelCase__ )
UpperCAmelCase = F.normalize(lowerCAmelCase__ , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = rename_backbone_keys(lowerCAmelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase = val
# create HuggingFace model and load state dict
UpperCAmelCase = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase = 15
UpperCAmelCase = 2
UpperCAmelCase = {0: """table""", 1: """table rotated"""}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase = 125
UpperCAmelCase = 6
UpperCAmelCase = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
UpperCAmelCase = TableTransformerForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# verify our conversion
UpperCAmelCase = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
UpperCAmelCase = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=lowerCAmelCase__ )
UpperCAmelCase = Image.open(lowerCAmelCase__ ).convert("""RGB""" )
UpperCAmelCase = normalize(resize(lowerCAmelCase__ , lowerCAmelCase__ ) ).unsqueeze(0 )
UpperCAmelCase = model(lowerCAmelCase__ )
if "detection" in checkpoint_url:
UpperCAmelCase = (1, 15, 3)
UpperCAmelCase = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
UpperCAmelCase = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
UpperCAmelCase = (1, 125, 7)
UpperCAmelCase = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
UpperCAmelCase = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
UpperCAmelCase = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(lowerCAmelCase__ )
image_processor.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 673 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def a__ ( lowerCAmelCase__ = 35 ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Dict=None ):
return field(default_factory=lambda: default , metadata=lowerCAmelCase__ )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ : Dict =list_field(
default=[], metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
}, )
UpperCamelCase_ : Any =list_field(
default=[8], metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
UpperCamelCase_ : Dict =list_field(
default=[8, 32, 128, 512], metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'}, )
UpperCamelCase_ : Dict =field(
default=lowercase, metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'}, )
UpperCamelCase_ : List[Any] =field(
default=lowercase, metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'}, )
UpperCamelCase_ : List[str] =field(
default=lowercase, metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
UpperCamelCase_ : str =field(default=lowercase, metadata={'help': 'Use FP16 to accelerate inference.'} )
UpperCamelCase_ : List[str] =field(default=lowercase, metadata={'help': 'Benchmark training of model'} )
UpperCamelCase_ : Any =field(default=lowercase, metadata={'help': 'Verbose memory tracing'} )
UpperCamelCase_ : Tuple =field(
default=lowercase, metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'}, )
UpperCamelCase_ : Optional[int] =field(
default=lowercase, metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
}, )
UpperCamelCase_ : Union[str, Any] =field(default=lowercase, metadata={'help': 'Trace memory line by line'} )
UpperCamelCase_ : str =field(default=lowercase, metadata={'help': 'Save result to a CSV file'} )
UpperCamelCase_ : Union[str, Any] =field(default=lowercase, metadata={'help': 'Save all print statements in a log file'} )
UpperCamelCase_ : int =field(default=lowercase, metadata={'help': 'Whether to print environment information'} )
UpperCamelCase_ : Any =field(
default=lowercase, metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
}, )
UpperCamelCase_ : List[str] =field(
default=f'''inference_time_{round(time() )}.csv''', metadata={'help': 'CSV filename used if saving time results to csv.'}, )
UpperCamelCase_ : List[str] =field(
default=f'''inference_memory_{round(time() )}.csv''', metadata={'help': 'CSV filename used if saving memory results to csv.'}, )
UpperCamelCase_ : str =field(
default=f'''train_time_{round(time() )}.csv''', metadata={'help': 'CSV filename used if saving time results to csv for training.'}, )
UpperCamelCase_ : Optional[Any] =field(
default=f'''train_memory_{round(time() )}.csv''', metadata={'help': 'CSV filename used if saving memory results to csv for training.'}, )
UpperCamelCase_ : int =field(
default=f'''env_info_{round(time() )}.csv''', metadata={'help': 'CSV filename used if saving environment information.'}, )
UpperCamelCase_ : Any =field(
default=f'''log_{round(time() )}.csv''', metadata={'help': 'Log filename used if print statements are saved in log.'}, )
UpperCamelCase_ : int =field(default=3, metadata={'help': 'Times an experiment will be run.'} )
UpperCamelCase_ : List[str] =field(
default=lowercase, metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
}, )
def UpperCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _UpperCAmelCase , )
def UpperCAmelCase ( self ) -> Optional[Any]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase ( self ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCAmelCase ( self ) -> List[str]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 658 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str] ) -> Dict:
"""simple docstring"""
__a = [1]
__a , __a , __a = 0, 0, 0
__a = ugly_nums[ia] * 2
__a = ugly_nums[ia] * 3
__a = ugly_nums[ia] * 5
for _ in range(1, lowerCAmelCase__ ):
__a = min(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
ugly_nums.append(lowerCAmelCase__ )
if next_num == next_a:
ia += 1
__a = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__a = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__a = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(200) = }""") | 448 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''vit'''
def __init__( self : List[str] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : int=1e-12 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=16 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = encoder_stride
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.11''' )
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 82 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = VQModel
UpperCAmelCase__ : Tuple = "sample"
@property
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=(32, 32) ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = 4
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
return {"sample": image}
@property
def snake_case_ ( self ) -> List[Any]:
return (3, 32, 32)
@property
def snake_case_ ( self ) -> Dict:
return (3, 32, 32)
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Union[str, Any] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
UpperCamelCase : Dict = self.dummy_input
return init_dict, inputs_dict
def snake_case_ ( self ) -> List[str]:
pass
def snake_case_ ( self ) -> Optional[int]:
pass
def snake_case_ ( self ) -> Any:
UpperCamelCase , UpperCamelCase : Tuple = VQModel.from_pretrained('fusing/vqgan-dummy', output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ), 0 )
model.to(_UpperCAmelCase )
UpperCamelCase : Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : str = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(_UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCamelCase : List[str] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
UpperCamelCase : Union[str, Any] = image.to(_UpperCAmelCase )
with torch.no_grad():
UpperCamelCase : Dict = model(_UpperCAmelCase ).sample
UpperCamelCase : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase : Dict = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(_UpperCAmelCase, _UpperCAmelCase, atol=1e-3 ) )
| 40 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : int=30 , _UpperCAmelCase : Tuple=400 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 20, "width": 20}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_convert_rgb
UpperCAmelCase_ = [512, 1024, 2048, 4096]
UpperCAmelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ = 2048
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
UpperCAmelCase_ = "Hello"
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ = 3
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 82 | 0 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 649 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase_ = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def a__ ( lowerCAmelCase__ ):
if "stem.conv" in name:
UpperCAmelCase_ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase_ = name.replace("blocks" , "layers" )
if "head.fc" in name:
UpperCAmelCase_ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
UpperCAmelCase_ = "bit." + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase_ = "bit.encoder." + name
return name
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ = get_config(lowerCAmelCase__ )
# load original model from timm
UpperCAmelCase_ = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val.squeeze() if "head" in key else val
# load HuggingFace model
UpperCAmelCase_ = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(lowerCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_ = processor(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase__ )
UpperCAmelCase_ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase_ = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case (unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = self.dummy_uncond_unet
_lowerCAmelCase : Optional[int] = ScoreSdeVeScheduler()
_lowerCAmelCase : Optional[Any] = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_UpperCAmelCase ).images
_lowerCAmelCase : int = torch.manual_seed(0 )
_lowerCAmelCase : Tuple = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_UpperCAmelCase , return_dict=_UpperCAmelCase )[
0
]
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : List[str] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = """google/ncsnpp-church-256"""
_lowerCAmelCase : int = UNetaDModel.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = ScoreSdeVeScheduler.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase : Dict = torch.manual_seed(0 )
_lowerCAmelCase : str = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_UpperCAmelCase ).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : List[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 429 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase_ = list(accumulate(lowerCAmelCase__ ) )
UpperCAmelCase_ = bisect(lowerCAmelCase__ , lowerCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import fire
from utils import calculate_rouge, save_json
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = [x.strip() for x in open(lowerCAmelCase__ ).readlines()]
lowercase__ : Union[str, Any] = [x.strip() for x in open(lowerCAmelCase__ ).readlines()][: len(lowerCAmelCase__ )]
lowercase__ : Dict = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
if save_path is not None:
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 496 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase = None
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( lowerCAmelCase__ ):
def remove_articles(lowerCAmelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def a__ ( lowerCAmelCase__ ):
if not s:
return []
return normalize_answer(lowerCAmelCase__ ).split()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = collections.Counter(lowerCAmelCase__ ) & collections.Counter(lowerCAmelCase__ )
UpperCAmelCase_ = sum(common.values() )
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["id"]
UpperCAmelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if not qid_list:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
plt.step(lowerCAmelCase__ , lowerCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCAmelCase__ , lowerCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase__ )
plt.savefig(lowerCAmelCase__ )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(lowerCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(lowerCAmelCase__ )
if i == len(lowerCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase__ )
recalls.append(lowerCAmelCase__ )
if out_image:
plot_pr_curve(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if out_image_dir and not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ = {k: float(lowerCAmelCase__ ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_exact" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_f1" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_oracle" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(lowerCAmelCase__ ) / float(len(lowerCAmelCase__ ) )
plt.hist(lowerCAmelCase__ , weights=lowerCAmelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase__ ), best_thresh
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def a__ ( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
UpperCAmelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(lowerCAmelCase__ ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
else:
print(json.dumps(lowerCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 82 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
__lowerCamelCase = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
__lowerCamelCase = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
__lowerCamelCase = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def snake_case_ ( self ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 288 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float(moles / volume ) * nfactor )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_UpperCAmelCase : Dict = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
_UpperCAmelCase : int = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
_UpperCAmelCase : Any = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def _lowercase ( self , _snake_case , _snake_case ) -> Any:
_UpperCamelCase : Any = 0.0
for i, j in zip(_UpperCAmelCase , _UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase , _UpperCAmelCase ) else 0.0
_UpperCamelCase : List[Any] = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 683 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase = 6_378_137.0
lowerCamelCase = 6_356_752.314_245
lowerCamelCase = 6_378_137
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase_ = haversine_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase_ = (b_lata + b_lata) / 2
UpperCAmelCase_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase_ = (sin(lowerCAmelCase__ ) ** 2) * (cos(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = cos(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma - sin(lowerCAmelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase_ = (cos(lowerCAmelCase__ ) ** 2) * (sin(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = sin(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma + sin(lowerCAmelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
from collections.abc import Callable
class __magic_name__ :
def __init__( self , _lowercase = None )-> None:
UpperCamelCase_ = []
# Stores indexes of each item for supporting updates and deletion.
UpperCamelCase_ = {}
# Stores current size of heap.
UpperCamelCase_ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCamelCase_ = key or (lambda _lowercase : x)
def UpperCAmelCase_ ( self , _lowercase )-> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def UpperCAmelCase_ ( self , _lowercase )-> int | None:
UpperCamelCase_ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCAmelCase_ ( self , _lowercase )-> int | None:
UpperCamelCase_ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> None:
UpperCamelCase_ , UpperCamelCase_ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCamelCase_ , UpperCamelCase_ = self.arr[j], self.arr[i]
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> bool:
return self.arr[i][1] < self.arr[j][1]
def UpperCAmelCase_ ( self , _lowercase )-> int:
UpperCamelCase_ = self._left(_UpperCAmelCase )
UpperCamelCase_ = self._right(_UpperCAmelCase )
UpperCamelCase_ = i
if left is not None and not self._cmp(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = left
if right is not None and not self._cmp(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = right
return valid_parent
def UpperCAmelCase_ ( self , _lowercase )-> None:
UpperCamelCase_ = self._parent(_UpperCAmelCase )
while parent is not None and not self._cmp(_UpperCAmelCase , _UpperCAmelCase ):
self._swap(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = parent, self._parent(_UpperCAmelCase )
def UpperCAmelCase_ ( self , _lowercase )-> None:
UpperCamelCase_ = self._get_valid_parent(_UpperCAmelCase )
while valid_parent != index:
self._swap(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = valid_parent, self._get_valid_parent(_UpperCAmelCase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> None:
if item not in self.pos_map:
return
UpperCamelCase_ = self.pos_map[item]
UpperCamelCase_ = [item, self.key(_UpperCAmelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_UpperCAmelCase )
self._heapify_down(_UpperCAmelCase )
def UpperCAmelCase_ ( self , _lowercase )-> None:
if item not in self.pos_map:
return
UpperCamelCase_ = self.pos_map[item]
del self.pos_map[item]
UpperCamelCase_ = self.arr[self.size - 1]
UpperCamelCase_ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_UpperCAmelCase )
self._heapify_down(_UpperCAmelCase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> None:
UpperCamelCase_ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_UpperCAmelCase )] )
else:
UpperCamelCase_ = [item, self.key(_UpperCAmelCase )]
UpperCamelCase_ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCAmelCase_ ( self )-> tuple | None:
return self.arr[0] if self.size else None
def UpperCAmelCase_ ( self )-> tuple | None:
UpperCamelCase_ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCAmelCase( )-> Tuple:
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = 300
return config
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = MraModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = MraModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = ()
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ):
A_ : str = 1
A_ : int = 3
A_ : Union[str, Any] = (32, 32)
A_ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : str = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
return CLIPTextModel(_UpperCAmelCase )
def _lowerCamelCase ( self ):
A_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.dummy_cond_unet_upscale
A_ : Optional[int] = DDPMScheduler()
A_ : List[str] = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : int = self.dummy_vae
A_ : Tuple = self.dummy_text_encoder
A_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : List[str] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A_ : Tuple = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
A_ : Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
A_ : Any = """A painting of a squirrel eating a burger"""
A_ : Optional[int] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
A_ : int = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
A_ : Optional[Any] = output.images
A_ : str = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
A_ : Optional[int] = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=_UpperCAmelCase , )[0]
A_ : int = image[0, -3:, -3:, -1]
A_ : Dict = image_from_tuple[0, -3:, -3:, -1]
A_ : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
A_ : str = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ):
A_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : List[str] = self.dummy_cond_unet_upscale
A_ : Any = DDPMScheduler()
A_ : Optional[Any] = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : List[Any] = self.dummy_vae
A_ : Dict = self.dummy_text_encoder
A_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : int = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A_ : Dict = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
A_ : List[Any] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
A_ : Any = """A painting of a squirrel eating a burger"""
A_ : List[str] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
A_ : int = output.images
assert image.shape[0] == 2
A_ : Any = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
A_ : List[Any] = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
A_ : Optional[int] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCamelCase ( self ):
A_ : Optional[Any] = self.dummy_cond_unet_upscale
A_ : Union[str, Any] = DDPMScheduler()
A_ : int = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : List[str] = self.dummy_vae
A_ : Optional[Any] = self.dummy_text_encoder
A_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Optional[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
A_ : List[Any] = unet.half()
A_ : List[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
A_ : str = StableDiffusionUpscalePipeline(
unet=_UpperCAmelCase , low_res_scheduler=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , max_noise_level=350 , )
A_ : List[str] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
A_ : Union[str, Any] = """A painting of a squirrel eating a burger"""
A_ : int = torch.manual_seed(0 )
A_ : Optional[Any] = sd_pipe(
[prompt] , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
A_ : List[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
A_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
A_ : int = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : Any = StableDiffusionUpscalePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """a cat sitting on a park bench"""
A_ : Dict = torch.manual_seed(0 )
A_ : List[Any] = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="""np""" , )
A_ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowerCamelCase ( self ):
A_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
A_ : List[str] = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
A_ : List[str] = """a cat sitting on a park bench"""
A_ : List[str] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="""np""" , )
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowerCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : Dict = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : Tuple = """a cat sitting on a park bench"""
A_ : List[str] = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
A_ : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 569 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase = 50_000
lowerCamelCase = 5_000
lowerCamelCase , lowerCamelCase = os.path.split(__file__)
lowerCamelCase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
def a__ ( ):
UpperCAmelCase_ = {"num examples": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
UpperCAmelCase_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
UpperCAmelCase_ = generate_example_dataset(
os.path.join(lowerCAmelCase__ , "dataset.arrow" ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print("shuffling dataset" )
UpperCAmelCase_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , "wb" ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class UpperCamelCase_ ( a_ ):
_A : str = 'mvp'
_A : List[Any] = ['past_key_values']
_A : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , snake_case__=5_02_67 , snake_case__=10_24 , snake_case__=12 , snake_case__=40_96 , snake_case__=16 , snake_case__=12 , snake_case__=40_96 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=10_24 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=1_00 , snake_case__=8_00 , **snake_case__ , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = classifier_dropout
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = use_prompt
UpperCAmelCase = prompt_length
UpperCAmelCase = prompt_mid_dim
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _UpperCAmelCase ):
UpperCAmelCase = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 673 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''image''': Image()} )
UpperCamelCase = Features({'''labels''': ClassLabel} )
UpperCamelCase = "image"
UpperCamelCase = "labels"
def lowercase__ ( self : str , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowercase__ ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 82 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__snake_case = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def _A ( ):
UpperCamelCase :Union[str, Any] = '''https://pypi.org/pypi/diffusers/json'''
UpperCamelCase :Union[str, Any] = json.loads(request.urlopen(lowerCAmelCase__ ).read() )['''releases'''].keys()
return sorted(lowerCAmelCase__ , key=lambda SCREAMING_SNAKE_CASE__ : version.Version(lowerCAmelCase__ ) )
def _A ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCamelCase :Tuple = Path(lowerCAmelCase__ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
init_hf_modules()
UpperCamelCase :Tuple = Path(lowerCAmelCase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCamelCase :Optional[int] = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCamelCase :List[str] = f.read()
# Imports of the form `import .xxx`
UpperCamelCase :int = re.findall('''^\s*import\s+\.(\S+)\s*$''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCAmelCase__ ) )
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :Any = False
UpperCamelCase :Any = [module_file]
UpperCamelCase :List[Any] = []
# Let's recurse through all relative imports
while not no_change:
UpperCamelCase :Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCAmelCase__ ) )
UpperCamelCase :Union[str, Any] = Path(lowerCAmelCase__ ).parent
UpperCamelCase :str = [str(module_path / m ) for m in new_imports]
UpperCamelCase :Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
UpperCamelCase :Union[str, Any] = [F'''{f}.py''' for f in new_import_files]
UpperCamelCase :Tuple = len(lowerCAmelCase__ ) == 0
all_relative_imports.extend(lowerCAmelCase__ )
return all_relative_imports
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCamelCase :Dict = f.read()
# Imports of the form `import xxx`
UpperCamelCase :int = re.findall('''^\s*import\s+(\S+)\s*$''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Only keep the top-level module
UpperCamelCase :Optional[int] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
UpperCamelCase :Dict = list(set(lowerCAmelCase__ ) )
UpperCamelCase :Optional[int] = []
for imp in imports:
try:
importlib.import_module(lowerCAmelCase__ )
except ImportError:
missing_packages.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{", ".join(lowerCAmelCase__ )}. Run `pip install {" ".join(lowerCAmelCase__ )}`''' )
return get_relative_imports(lowerCAmelCase__ )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Optional[Any] = module_path.replace(os.path.sep , '''.''' )
UpperCamelCase :int = importlib.import_module(lowerCAmelCase__ )
if class_name is None:
return find_pipeline_class(lowerCAmelCase__ )
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
from ..pipelines import DiffusionPipeline
UpperCamelCase :Optional[int] = dict(inspect.getmembers(lowerCAmelCase__ , inspect.isclass ) )
UpperCamelCase :Optional[int] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCAmelCase__ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
UpperCamelCase :Dict = cls
return pipeline_class
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict = None , SCREAMING_SNAKE_CASE__ : List[Any] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = False , SCREAMING_SNAKE_CASE__ : Tuple = None , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = False , ):
UpperCamelCase :Optional[int] = str(lowerCAmelCase__ )
UpperCamelCase :Tuple = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
UpperCamelCase :Union[str, Any] = module_file_or_url
UpperCamelCase :Optional[Any] = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
UpperCamelCase :str = get_diffusers_versions()
# cut ".dev0"
UpperCamelCase :Optional[int] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
UpperCamelCase :List[Any] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
UpperCamelCase :int = F'''v{revision}'''
elif revision == "main":
UpperCamelCase :int = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {", ".join(available_versions + ["main"] )}.''' )
# community pipeline on GitHub
UpperCamelCase :Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCAmelCase__ , pipeline=lowerCAmelCase__ )
try:
UpperCamelCase :Union[str, Any] = cached_download(
lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , )
UpperCamelCase :Dict = '''git'''
UpperCamelCase :Optional[int] = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
UpperCamelCase :List[Any] = hf_hub_download(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , )
UpperCamelCase :str = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
UpperCamelCase :Optional[Any] = check_imports(lowerCAmelCase__ )
# Now we move the module inside our cached dynamic modules.
UpperCamelCase :int = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCAmelCase__ )
UpperCamelCase :Union[str, Any] = Path(lowerCAmelCase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCAmelCase__ , submodule_path / module_file )
for module_needed in modules_needed:
UpperCamelCase :Any = F'''{module_needed}.py'''
shutil.copy(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase :Any = use_auth_token
elif use_auth_token is True:
UpperCamelCase :Optional[int] = HfFolder.get_token()
else:
UpperCamelCase :Any = None
UpperCamelCase :int = model_info(lowerCAmelCase__ , revision=lowerCAmelCase__ , token=lowerCAmelCase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCamelCase :Dict = submodule_path / commit_hash
UpperCamelCase :Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCAmelCase__ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCAmelCase__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCAmelCase__ , F'''{module_needed}.py''' , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
return os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Any] = None , SCREAMING_SNAKE_CASE__ : int = False , SCREAMING_SNAKE_CASE__ : List[Any] = False , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None , SCREAMING_SNAKE_CASE__ : Tuple = None , SCREAMING_SNAKE_CASE__ : str = False , **SCREAMING_SNAKE_CASE__ : Dict , ):
UpperCamelCase :List[Any] = get_cached_module_file(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
return get_class_in_module(lowerCAmelCase__ , final_module.replace('''.py''' , '''''' ) )
| 658 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase ):
__a ="nat"
__a ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowerCamelCase=4 , lowerCamelCase=3 , lowerCamelCase=64 , lowerCamelCase=[3, 4, 6, 5] , lowerCamelCase=[2, 4, 8, 16] , lowerCamelCase=7 , lowerCamelCase=3.0 , lowerCamelCase=True , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase="gelu" , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ) ->List[Any]:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = len(_UpperCAmelCase )
__a = num_heads
__a = kernel_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = layer_norm_eps
__a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
__a = layer_scale_init_value
__a = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names ) | 448 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ = 20 ):
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
else:
UpperCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
UpperCAmelCase_ = ["key_proj", "value_proj", "query_proj"]
UpperCAmelCase_ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCAmelCase_ = key.split("." )
if attributes[0] == "lm_head":
UpperCAmelCase_ = prophet
UpperCAmelCase_ = prophet_old
else:
UpperCAmelCase_ = prophet.prophetnet
UpperCAmelCase_ = prophet_old.model
UpperCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase_ = mapping[attribute]
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = attribute
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
UpperCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.bias
logger.info(f"""{attribute} is initialized""" )
UpperCAmelCase_ = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase__ , "in_proj_weight" ):
UpperCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase_ = True
break
if attribute.isdigit():
UpperCAmelCase_ = model[int(lowerCAmelCase__ )]
UpperCAmelCase_ = old_model[int(lowerCAmelCase__ )]
else:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_attribute == "":
UpperCAmelCase_ = old_model
else:
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 82 | 0 |
'''simple docstring'''
lowercase : List[str] = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __a ( A__ ) -> List[Any]:
lowerCAmelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
lowerCAmelCase = Stack()
lowerCAmelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCAmelCase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCAmelCase__ )
elif i == ")":
# RULE 4
lowerCAmelCase = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase = operators[opr](lowerCAmelCase__ , lowerCAmelCase__ )
operand_stack.push(lowerCAmelCase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowercase : str = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 649 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(lowerCAmelCase__ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) <= 1:
return arr, 0
UpperCAmelCase_ = len(lowerCAmelCase__ ) // 2
UpperCAmelCase_ = arr[0:mid]
UpperCAmelCase_ = arr[mid:]
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = _count_cross_inversions(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = 0
while i < len(lowerCAmelCase__ ) and j < len(lowerCAmelCase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a__ ( ):
UpperCAmelCase_ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowerCAmelCase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase__ )
# an empty list should also have zero inversions
UpperCAmelCase_ = []
UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 82 | 0 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __snake_case (unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_lowerCAmelCase : Any = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_lowerCAmelCase : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_lowerCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_lowerCAmelCase : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_lowerCAmelCase : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_lowerCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_lowerCAmelCase : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_lowerCAmelCase : Any = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_lowerCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_lowerCAmelCase : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_lowerCAmelCase : str = None
_lowerCAmelCase : Union[str, Any] = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[str] = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_lowerCAmelCase : List[Any] = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = fill_masker.tokenizer
_lowerCAmelCase : Union[str, Any] = fill_masker.model
_lowerCAmelCase : Union[str, Any] = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
_UpperCAmelCase , [
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
] , )
_lowerCAmelCase : Optional[int] = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
_UpperCAmelCase , [
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
] , )
_lowerCAmelCase : int = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
_UpperCAmelCase , [
[
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
],
[
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker("""This is""" )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : str = tokenizer.get_vocab()
_lowerCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
_lowerCAmelCase : List[str] = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
_lowerCAmelCase : Dict = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_UpperCAmelCase , [
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
] , )
_lowerCAmelCase : Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
_lowerCAmelCase : Tuple = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
_lowerCAmelCase : List[str] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
] , )
_lowerCAmelCase : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _UpperCAmelCase )
_lowerCAmelCase : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
_lowerCAmelCase : Dict = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_UpperCAmelCase )
_lowerCAmelCase : int = [top_mask["""token_str"""] for top_mask in outputs]
_lowerCAmelCase : Tuple = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
_lowerCAmelCase : Tuple = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : str = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Union[str, Any] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""""""] )
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Optional[Any] = fill_masker(f"This is a {tokenizer.mask_token}" , targets="""""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : str = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
_lowerCAmelCase : Optional[Any] = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_UpperCAmelCase , [
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
] , )
_lowerCAmelCase : Dict = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
_lowerCAmelCase : Dict = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = tokenizer.get_vocab()
_lowerCAmelCase : List[Any] = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
_lowerCAmelCase : Tuple = sorted(vocab.keys() )[:3]
_lowerCAmelCase : Any = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_lowerCAmelCase : str = [el["""token_str"""] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
_lowerCAmelCase : str = tokenizer.get_vocab()
# String duplicates + id duplicates
_lowerCAmelCase : str = sorted(vocab.keys() )[:3]
_lowerCAmelCase : List[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_lowerCAmelCase : Tuple = fill_masker(f"My name is {tokenizer.mask_token}" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
_lowerCAmelCase : List[str] = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
],
[
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
],
[
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
{"""sequence""": ANY(_UpperCAmelCase ), """score""": ANY(_UpperCAmelCase ), """token""": ANY(_UpperCAmelCase ), """token_str""": ANY(_UpperCAmelCase )},
],
] , )
| 429 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase_ = len(bin(lowerCAmelCase__ )[3:] )
UpperCAmelCase_ = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCAmelCase__ = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_2
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
def snake_case ( self : str ):
lowercase__ , lowercase__ , lowercase__ : List[str] = _str_to_version_tuple(self.version_str )
def __repr__( self : Any ):
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def snake_case ( self : Optional[int] ):
return self.major, self.minor, self.patch
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : str ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return Version(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return other
raise TypeError(f"""{other} (type {type(_UpperCAmelCase )}) cannot be compared to version.""" )
def __eq__( self : Any , SCREAMING_SNAKE_CASE : Dict ):
try:
lowercase__ : List[Any] = self._validate_operand(_UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Optional[int] = self._validate_operand(_UpperCAmelCase )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def snake_case ( cls : Any , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def snake_case ( self : List[str] ):
return self.version_str
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = _VERSION_REG.match(lowerCAmelCase__ )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(lowerCAmelCase__ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return ".".join(str(lowerCAmelCase__ ) for v in version_tuple )
| 496 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **_UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(_UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , _UpperCAmelCase : Union[str, List[str]] = None , **_UpperCAmelCase : Optional[int] , ) -> List[Any]:
'''simple docstring'''
if "text_queries" in kwargs:
UpperCAmelCase_ = kwargs.pop("text_queries" )
if isinstance(_UpperCAmelCase , (str, Image.Image) ):
UpperCAmelCase_ = {"image": image, "candidate_labels": candidate_labels}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def lowercase__ ( self : str , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = {}
if "threshold" in kwargs:
UpperCAmelCase_ = kwargs["threshold"]
if "top_k" in kwargs:
UpperCAmelCase_ = kwargs["top_k"]
return {}, {}, postprocess_params
def lowercase__ ( self : int , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = load_image(inputs["image"] )
UpperCAmelCase_ = inputs["candidate_labels"]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = candidate_labels.split("," )
UpperCAmelCase_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_UpperCAmelCase ):
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase__ ( self : int , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = model_inputs.pop("target_size" )
UpperCAmelCase_ = model_inputs.pop("candidate_label" )
UpperCAmelCase_ = model_inputs.pop("is_last" )
UpperCAmelCase_ = self.model(**_UpperCAmelCase )
UpperCAmelCase_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def lowercase__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[str]=None ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
for model_output in model_outputs:
UpperCAmelCase_ = model_output["candidate_label"]
UpperCAmelCase_ = BaseModelOutput(_UpperCAmelCase )
UpperCAmelCase_ = self.image_processor.post_process_object_detection(
outputs=_UpperCAmelCase , threshold=_UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
UpperCAmelCase_ = outputs["scores"][index].item()
UpperCAmelCase_ = self._get_bounding_box(outputs["boxes"][index][0] )
UpperCAmelCase_ = {"score": score, "label": label, "box": box}
results.append(_UpperCAmelCase )
UpperCAmelCase_ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )
if top_k:
UpperCAmelCase_ = results[:top_k]
return results
def lowercase__ ( self : str , _UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = box.int().tolist()
UpperCAmelCase_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _snake_case , unittest.TestCase ):
lowercase = BarthezTokenizer
lowercase = BarthezTokenizerFast
lowercase = True
lowercase = True
def snake_case_ ( self ) -> int:
'''simple docstring'''
super().setUp()
A_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_UpperCAmelCase )
A_ = tokenizer
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = """<pad>"""
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_UpperCAmelCase ) , 101122 )
def snake_case_ ( self ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A_ = [0, 57, 3018, 70307, 91, 2]
A_ = self.tokenizer(
_UpperCAmelCase , max_length=len(_UpperCAmelCase ) , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
A_ = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = """I was born in 92000, and this is falsé."""
A_ = tokenizer.tokenize(_UpperCAmelCase )
A_ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
A_ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
A_ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(_UpperCAmelCase )
A_ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = {"""input_ids""": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=_UpperCAmelCase , )
| 288 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=30 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : str=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Dict=None , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel(config=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
UpperCAmelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase__ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ = self.image_size // 2
UpperCAmelCase_ = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = TFViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , tf.keras.layers.Layer ) )
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 )
| 82 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowerCamelCase = {
"""facebook/bart-base""": 1_024,
"""facebook/bart-large""": 1_024,
"""facebook/bart-large-mnli""": 1_024,
"""facebook/bart-large-cnn""": 1_024,
"""facebook/bart-large-xsum""": 1_024,
"""yjernite/bart_eli5""": 1_024,
}
@lru_cache()
def a__ ( ):
UpperCAmelCase_ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCAmelCase_ = bs[:]
UpperCAmelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ = char
return pairs
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]="replace" , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : List[Any]="<mask>" , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : Dict , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ = json.load(_UpperCAmelCase )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = errors # how to handle errors in decoding
UpperCAmelCase_ = bytes_to_unicode()
UpperCAmelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase_ = {}
UpperCAmelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
return len(self.encoder )
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ = tuple(_UpperCAmelCase )
UpperCAmelCase_ = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase_ = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ = bigram
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while i < len(_UpperCAmelCase ):
try:
UpperCAmelCase_ = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ = tuple(_UpperCAmelCase )
UpperCAmelCase_ = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase_ = get_pairs(_UpperCAmelCase )
UpperCAmelCase_ = " ".join(_UpperCAmelCase )
UpperCAmelCase_ = word
return word
def lowercase__ ( self : Dict , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = []
for token in re.findall(self.pat , _UpperCAmelCase ):
UpperCAmelCase_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(" " ) )
return bpe_tokens
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "".join(_UpperCAmelCase )
UpperCAmelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + "\n" )
UpperCAmelCase_ = 0
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(" ".join(_UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowercase__ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
UpperCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
UpperCAmelCase_ = " " + text
return (text, kwargs)
| 82 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : list[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = len(A_ )
for i in range(A_ ):
for j in range(i + 1, A_ ):
if numbers[j] < numbers[i]:
_lowerCamelCase , _lowerCamelCase : List[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 83 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(A_ ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class __snake_case :
snake_case__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
snake_case__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
snake_case__ : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.task_name.lower()
class __snake_case ( _lowercase):
snake_case__ : Union[str, Any] = "train"
snake_case__ : Tuple = "dev"
snake_case__ : int = "test"
class __snake_case ( _lowercase):
snake_case__ : GlueDataTrainingArguments
snake_case__ : str
snake_case__ : List[InputFeatures]
def __init__( self : str , __lowerCAmelCase : GlueDataTrainingArguments , __lowerCAmelCase : PreTrainedTokenizerBase , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Union[str, Split] = Split.train , __lowerCAmelCase : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , )
_lowerCamelCase : str = args
_lowerCamelCase : Union[str, Any] = glue_processors[args.task_name]()
_lowerCamelCase : Tuple = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
_lowerCamelCase : List[Any] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_lowerCamelCase : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
_lowerCamelCase : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCamelCase , _lowerCamelCase : Dict = label_list[2], label_list[1]
_lowerCamelCase : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : List[Any] = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
_lowerCamelCase : Optional[int] = time.time()
_lowerCamelCase : Optional[Any] = torch.load(__lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
_lowerCamelCase : Optional[int] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_lowerCamelCase : int = self.processor.get_test_examples(args.data_dir )
else:
_lowerCamelCase : List[str] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_lowerCamelCase : int = examples[:limit_length]
_lowerCamelCase : Any = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
_lowerCamelCase : Union[str, Any] = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[int] , __lowerCAmelCase : Dict ):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return self.label_list
| 83 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83 | 1 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
lowerCAmelCase__ = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
lowerCAmelCase__ = {
'''jukebox''': 512,
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int]=["v3", "v2", "v2"] , __lowerCAmelCase : Any=5_1_2 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : List[str]="<|endoftext|>" , **__lowerCAmelCase : Any , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token
super().__init__(
unk_token=__lowerCAmelCase , n_genres=__lowerCAmelCase , version=__lowerCAmelCase , max_n_lyric_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : List[str] = version
_lowerCamelCase : List[str] = max_n_lyric_tokens
_lowerCamelCase : Optional[Any] = n_genres
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase : Tuple = json.load(__lowerCAmelCase )
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase : Any = json.load(__lowerCAmelCase )
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_lowerCamelCase : List[Any] = json.load(__lowerCAmelCase )
_lowerCamelCase : List[str] = R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
_lowerCamelCase : Optional[Any] = oov.replace(R'''\-\'''' , R'''\-+\'''' )
_lowerCamelCase : Optional[Any] = regex.compile(__lowerCAmelCase )
_lowerCamelCase : Dict = {v: k for k, v in self.artists_encoder.items()}
_lowerCamelCase : Tuple = {v: k for k, v in self.genres_encoder.items()}
_lowerCamelCase : str = {v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.artists_encoder.get(__lowerCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : Union[str, Any] = [self.genres_encoder.get(__lowerCAmelCase , 0 ) for genre in list_genres[genres]]
_lowerCamelCase : List[str] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_lowerCamelCase : Union[str, Any] = [[self.lyrics_encoder.get(__lowerCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return list(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.prepare_for_tokenization(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = self._tokenize(__lowerCAmelCase )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ):
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_lowerCamelCase : Dict = artists[idx].lower()
_lowerCamelCase : List[str] = [genres[idx].lower()]
else:
_lowerCamelCase : int = self._normalize(artists[idx] ) + '''.v2'''
_lowerCamelCase : List[str] = [
self._normalize(__lowerCAmelCase ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_lowerCamelCase : Tuple = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
_lowerCamelCase : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
_lowerCamelCase : Optional[Any] = {vocab[index]: index + 1 for index in range(len(__lowerCAmelCase ) )}
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Any = len(__lowerCAmelCase ) + 1
_lowerCamelCase : int = self.vocab
_lowerCamelCase : Dict = {v: k for k, v in self.vocab.items()}
_lowerCamelCase : Tuple = ''''''
else:
_lowerCamelCase : int = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
_lowerCamelCase : Any = self._run_strip_accents(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = lyrics.replace('''\\''' , '''\n''' )
_lowerCamelCase : Optional[int] = self.out_of_vocab.sub('''''' , __lowerCAmelCase ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = unicodedata.normalize('''NFD''' , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
for char in text:
_lowerCamelCase : List[str] = unicodedata.category(__lowerCAmelCase )
if cat == "Mn":
continue
output.append(__lowerCAmelCase )
return "".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (
[chr(__lowerCAmelCase ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(__lowerCAmelCase ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(__lowerCAmelCase ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
_lowerCamelCase : List[Any] = frozenset(__lowerCAmelCase )
_lowerCamelCase : Any = re.compile(R'''_+''' )
_lowerCamelCase : Optional[int] = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
_lowerCamelCase : List[str] = pattern.sub('''_''' , __lowerCAmelCase ).strip('''_''' )
return text
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[str] ):
"""simple docstring"""
return " ".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : bool = False ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : str = TensorType(__lowerCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
_lowerCamelCase : Optional[int] = tf.constant
_lowerCamelCase : Optional[int] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
_lowerCamelCase : Any = torch.tensor
_lowerCamelCase : Tuple = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
_lowerCamelCase : str = jnp.array
_lowerCamelCase : Dict = _is_jax
else:
_lowerCamelCase : Dict = np.asarray
_lowerCamelCase : List[str] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_lowerCamelCase : int = [inputs]
if not is_tensor(__lowerCAmelCase ):
_lowerCamelCase : Dict = as_tensor(__lowerCAmelCase )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : int="" , __lowerCAmelCase : Union[str, Any]="pt" ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [0, 0, 0]
_lowerCamelCase : Optional[Any] = [artist] * len(self.version )
_lowerCamelCase : int = [genres] * len(self.version )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = self.tokenize(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self._convert_token_to_id(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Any = [-INFINITY] * len(full_tokens[-1] )
_lowerCamelCase : Optional[int] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__lowerCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__lowerCAmelCase ) )
_lowerCamelCase : Dict = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__lowerCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : str = self.artists_decoder.get(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [self.genres_decoder.get(__lowerCAmelCase ) for genre in genres_index]
_lowerCamelCase : Optional[int] = [self.lyrics_decoder.get(__lowerCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 83 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __snake_case ( _lowercase):
snake_case__ : int = "philschmid/bart-large-cnn-samsum"
snake_case__ : List[Any] = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
snake_case__ : int = "summarizer"
snake_case__ : Dict = AutoTokenizer
snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM
snake_case__ : List[str] = ["text"]
snake_case__ : Union[str, Any] = ["text"]
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str ):
"""simple docstring"""
return self.pre_processor(__lowerCAmelCase , return_tensors='''pt''' , truncation=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__lowerCAmelCase )[0]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return self.pre_processor.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
| 83 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = generator.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 83 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
def snake_case_ ( A_ : int, A_ : str, A_ : LevitConfig, A_ : Path, A_ : bool = True ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
_lowerCamelCase : int = timm.create_model('''levit_128s''', pretrained=A_ )
else:
_lowerCamelCase : Tuple = timm.create_model('''levit_128''', pretrained=A_ )
if hidden_sizes == 1_92:
_lowerCamelCase : List[str] = timm.create_model('''levit_192''', pretrained=A_ )
if hidden_sizes == 2_56:
_lowerCamelCase : Union[str, Any] = timm.create_model('''levit_256''', pretrained=A_ )
if hidden_sizes == 3_84:
_lowerCamelCase : Union[str, Any] = timm.create_model('''levit_384''', pretrained=A_ )
from_model.eval()
_lowerCamelCase : Any = LevitForImageClassificationWithTeacher(A_ ).eval()
_lowerCamelCase : int = OrderedDict()
_lowerCamelCase : Any = from_model.state_dict()
_lowerCamelCase : List[str] = list(from_model.state_dict().keys() )
_lowerCamelCase : List[str] = list(our_model.state_dict().keys() )
print(len(A_ ), len(A_ ) )
for i in range(len(A_ ) ):
_lowerCamelCase : Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(A_ )
_lowerCamelCase : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
_lowerCamelCase : Union[str, Any] = from_model(A_ )
_lowerCamelCase : Optional[Any] = our_model(A_ ).logits
assert torch.allclose(A_, A_ ), "The model logits don't match the original one."
_lowerCamelCase : int = name
print(A_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_lowerCamelCase : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def snake_case_ ( A_ : Path, A_ : str = None, A_ : bool = True ):
'''simple docstring'''
_lowerCamelCase : Dict = '''imagenet-1k-id2label.json'''
_lowerCamelCase : Dict = 10_00
_lowerCamelCase : Union[str, Any] = (1, num_labels)
_lowerCamelCase : Tuple = '''huggingface/label-files'''
_lowerCamelCase : Any = num_labels
_lowerCamelCase : List[Any] = json.load(open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) )
_lowerCamelCase : List[str] = {int(A_ ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()}
_lowerCamelCase : int = partial(A_, num_labels=A_, idalabel=A_, labelaid=A_ )
_lowerCamelCase : Optional[int] = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
_lowerCamelCase : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name], A_, names_to_config[model_name], A_, A_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name], A_, A_, A_, A_ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 83 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 | 1 |
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowerCAmelCase__ = '''bert-base-cased'''
lowerCAmelCase__ = '''fp16'''
lowerCAmelCase__ = '''bf16'''
lowerCAmelCase__ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().setUp()
_lowerCamelCase : List[Any] = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__lowerCAmelCase ):
_lowerCamelCase : str = self.dist_env.copy()
_lowerCamelCase : Optional[Any] = f'''{i + 1}'''
_lowerCamelCase : List[str] = strategy
with mockenv_context(**__lowerCAmelCase ):
_lowerCamelCase : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = self.dist_env.copy()
_lowerCamelCase : Any = prefetch_policy
with mockenv_context(**__lowerCAmelCase ):
_lowerCamelCase : List[str] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Tuple = self.dist_env.copy()
_lowerCamelCase : str = state_dict_type
with mockenv_context(**__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = AutoModel.from_pretrained(__lowerCAmelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
_lowerCamelCase : Optional[Any] = self.dist_env.copy()
_lowerCamelCase : str = policy
if policy == "TRANSFORMER_BASED_WRAP":
_lowerCamelCase : str = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
_lowerCamelCase : Union[str, Any] = '''2000'''
with mockenv_context(**__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_lowerCamelCase : Optional[int] = self.dist_env.copy()
_lowerCamelCase : Union[str, Any] = '''TRANSFORMER_BASED_WRAP'''
_lowerCamelCase : Tuple = '''T5Layer'''
with mockenv_context(**__lowerCAmelCase ):
_lowerCamelCase : List[str] = FullyShardedDataParallelPlugin()
with self.assertRaises(__lowerCAmelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
_lowerCamelCase : List[Any] = self.dist_env.copy()
_lowerCamelCase : List[str] = '''SIZE_BASED_WRAP'''
_lowerCamelCase : Optional[Any] = '''0'''
with mockenv_context(**__lowerCAmelCase ):
_lowerCamelCase : int = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCAmelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_lowerCamelCase : Optional[Any] = self.dist_env.copy()
_lowerCamelCase : Dict = mp_dtype
with mockenv_context(**__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = Accelerator()
if mp_dtype == "fp16":
_lowerCamelCase : List[str] = torch.floataa
elif mp_dtype == "bf16":
_lowerCamelCase : Optional[int] = torch.bfloataa
_lowerCamelCase : Optional[Any] = MixedPrecision(param_dtype=__lowerCAmelCase , reduce_dtype=__lowerCAmelCase , buffer_dtype=__lowerCAmelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __lowerCAmelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __lowerCAmelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_lowerCamelCase : Union[str, Any] = self.dist_env.copy()
_lowerCamelCase : Union[str, Any] = str(__lowerCAmelCase ).lower()
with mockenv_context(**__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__lowerCAmelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().setUp()
_lowerCamelCase : int = 0.82
_lowerCamelCase : str = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
_lowerCamelCase : Union[str, Any] = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_lowerCamelCase : Optional[int] = 1_6_0
_lowerCamelCase : List[Any] = 1_6_0
_lowerCamelCase : int = inspect.getfile(accelerate.test_utils )
_lowerCamelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
_lowerCamelCase : Tuple = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
_lowerCamelCase : Dict = cmd.copy()
for i, strategy in enumerate(__lowerCAmelCase ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
_lowerCamelCase : List[str] = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
_lowerCamelCase : int = len(__lowerCAmelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_lowerCamelCase : int = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
_lowerCamelCase : List[Any] = cmd_config[:-1]
_lowerCamelCase : List[Any] = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
_lowerCamelCase : Optional[Any] = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_lowerCamelCase : Optional[int] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(__lowerCAmelCase ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
| 83 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 1 |
"""simple docstring"""
import numpy as np
import qiskit
def snake_case_ ( A_ : int = 8, A_ : int | None = None ):
'''simple docstring'''
_lowerCamelCase : str = np.random.default_rng(seed=A_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCamelCase : Any = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCamelCase : Union[str, Any] = rng.integers(2, size=A_ )
# The set of states Alice will prepare.
_lowerCamelCase : Tuple = rng.integers(2, size=A_ )
# Measurement basis for Bob's qubits.
_lowerCamelCase : Optional[int] = rng.integers(2, size=A_ )
# Quantum Circuit to simulate BB84
_lowerCamelCase : Tuple = qiskit.QuantumCircuit(A_, name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A_ ):
if alice_state[index] == 1:
bbaa_circ.x(A_ )
if alice_basis[index] == 1:
bbaa_circ.h(A_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A_ ):
if bob_basis[index] == 1:
bbaa_circ.h(A_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCamelCase : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCamelCase : Dict = qiskit.execute(A_, A_, shots=1, seed_simulator=A_ )
# Returns the result of measurement.
_lowerCamelCase : Union[str, Any] = job.result().get_counts(A_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCamelCase : Optional[Any] = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A_, A_, A_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCamelCase : Dict = gen_key[:key_len] if len(A_ ) >= key_len else gen_key.ljust(A_, '''0''' )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 83 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83 | 1 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : int = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_lowerCamelCase : Optional[Any] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_lowerCamelCase : List[Any] = v.replace(A_, A_ )
_lowerCamelCase : Optional[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_lowerCamelCase : Optional[Any] = v.replace(A_, A_ )
_lowerCamelCase : Optional[Any] = v
_lowerCamelCase : List[str] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
return w.reshape(*w.shape, 1, 1 )
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_lowerCamelCase : Tuple = v.replace(A_, A_ )
_lowerCamelCase : str = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_lowerCamelCase : Tuple = v.replace(A_, A_ )
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : List[Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
_lowerCamelCase : Optional[int] = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
_lowerCamelCase : str = reshape_weight_for_sd(A_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = {}
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
_lowerCamelCase : Union[str, Any] = k[: -len('''.q_proj.weight''' )]
_lowerCamelCase : int = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
_lowerCamelCase : Any = [None, None, None]
_lowerCamelCase : List[Any] = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
_lowerCamelCase : int = k[: -len('''.q_proj.bias''' )]
_lowerCamelCase : Union[str, Any] = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
_lowerCamelCase : List[str] = [None, None, None]
_lowerCamelCase : int = v
continue
_lowerCamelCase : List[str] = textenc_pattern.sub(lambda A_ : protected[re.escape(m.group(0 ) )], A_ )
_lowerCamelCase : List[Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_lowerCamelCase : Tuple = textenc_pattern.sub(lambda A_ : protected[re.escape(m.group(0 ) )], A_ )
_lowerCamelCase : Dict = torch.cat(A_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_lowerCamelCase : Any = textenc_pattern.sub(lambda A_ : protected[re.escape(m.group(0 ) )], A_ )
_lowerCamelCase : Any = torch.cat(A_ )
return new_state_dict
def snake_case_ ( A_ : Any ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 83 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = "camembert"
def __init__( self : Optional[Any] , __lowerCAmelCase : Any=3_0_5_2_2 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : List[Any]=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : str="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : List[Any] = position_embedding_type
_lowerCamelCase : int = use_cache
_lowerCamelCase : List[str] = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 83 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : Union[str, Any]=[1_0, 2_0, 3_0, 4_0] , __lowerCAmelCase : Any=[1, 1, 2, 1] , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Tuple="relu" , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : List[str]=None , ):
"""simple docstring"""
_lowerCamelCase : str = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : int = image_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : List[str] = embeddings_size
_lowerCamelCase : Optional[Any] = hidden_sizes
_lowerCamelCase : Dict = depths
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : Dict = scope
_lowerCamelCase : List[str] = len(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = RegNetModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : str = RegNetForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Any = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case__ : int = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Dict = False
snake_case__ : List[Any] = False
snake_case__ : Optional[int] = False
snake_case__ : List[str] = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = RegNetModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(config=__lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Any = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase : Tuple = layer_type
_lowerCamelCase : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = RegNetModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCAmelCase )
_lowerCamelCase : str = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def snake_case_ ( A_ : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path, args.datasets_directory )
class __snake_case ( _lowercase):
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : ArgumentParser ):
"""simple docstring"""
_lowerCamelCase : List[str] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_logger('''datasets-cli/converting''' )
_lowerCamelCase : int = tfds_path
_lowerCamelCase : Dict = datasets_directory
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowerCamelCase : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
_lowerCamelCase : int = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowerCamelCase : str = []
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : List[str] = os.listdir(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_lowerCamelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__lowerCAmelCase , encoding='''utf-8''' ) as f:
_lowerCamelCase : Tuple = f.readlines()
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = []
for line in lines:
_lowerCamelCase : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCamelCase : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
_lowerCamelCase : List[str] = ''''''
continue
elif "from absl import logging" in out_line:
_lowerCamelCase : str = '''from datasets import logging\n'''
elif "getLogger" in out_line:
_lowerCamelCase : Union[str, Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = list(filter(lambda __lowerCAmelCase : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + '''\n''' )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCamelCase : str = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCamelCase : Dict = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
_lowerCamelCase : Union[str, Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCamelCase : Any = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCamelCase : Union[str, Any] = f_name.replace('''.py''' , '''''' )
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowerCamelCase : Optional[int] = os.path.basename(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 83 | 1 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 |
"""simple docstring"""
def snake_case_ ( A_ : list, A_ : list, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : int = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = knapsack(A_, A_, A_, A_, index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Tuple = values[index] + knapsack(
A_, A_, A_, max_weight - weights[index], index + 1 )
return max(A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ ( A_ : int ):
'''simple docstring'''
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2], unknown_args[1::2] )}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = ArgumentParser(
'''HuggingFace Datasets CLI tool''', usage='''datasets-cli <command> [<args>]''', allow_abbrev=A_ )
_lowerCamelCase : int = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(A_ )
EnvironmentCommand.register_subcommand(A_ )
TestCommand.register_subcommand(A_ )
RunBeamCommand.register_subcommand(A_ )
DummyDataCommand.register_subcommand(A_ )
# Parse args
_lowerCamelCase , _lowerCamelCase : str = parser.parse_known_args()
if not hasattr(A_, '''func''' ):
parser.print_help()
exit(1 )
_lowerCamelCase : Optional[Any] = parse_unknown_args(A_ )
# Run
_lowerCamelCase : Tuple = args.func(A_, **A_ )
service.run()
if __name__ == "__main__":
main()
| 83 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : Optional[Any] = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 83 | 1 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('''nan''')
class __snake_case :
def __init__( self : int , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = sys.stdout
_lowerCamelCase : List[Any] = open(__lowerCAmelCase , '''a''' )
def __getattr__( self : str , __lowerCAmelCase : List[str] ):
"""simple docstring"""
return getattr(self.stdout , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
self.stdout.write(__lowerCAmelCase )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __lowerCAmelCase , 0 , re.M ) )
def snake_case_ ( A_ : str=80, A_ : List[Any]=False ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = []
# deal with critical env vars
_lowerCamelCase : Union[str, Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
_lowerCamelCase : str = os.environ.get(A_, A_ )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
_lowerCamelCase : int = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(A_ )
# now the normal args
cmd += list(map(shlex.quote, sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_lowerCamelCase : Tuple = []
_lowerCamelCase : int = ''''''
while len(A_ ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(A_ ) == 0 or len(A_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(A_ )
_lowerCamelCase : int = ''''''
return "\\\n".join(A_ )
def snake_case_ ( A_ : Tuple, A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = re.sub(R'''[\\\n]+''', ''' ''', args.base_cmd )
# remove --output_dir if any and set our own
_lowerCamelCase : Union[str, Any] = re.sub('''--output_dir\s+[^\s]+''', '''''', args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
_lowerCamelCase : Optional[int] = re.sub('''--overwrite_output_dir\s+''', '''''', args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def snake_case_ ( A_ : int, A_ : List[Any], A_ : str, A_ : List[Any], A_ : Optional[int], A_ : Union[str, Any], A_ : List[str] ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0, 1_00 ) for k in metric_keys}, **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )}, )
_lowerCamelCase : List[str] = subprocess.run(A_, capture_output=A_, text=A_ )
if verbose:
print('''STDOUT''', result.stdout )
print('''STDERR''', result.stderr )
# save the streams
_lowerCamelCase : Union[str, Any] = variation.replace(''' ''', '''-''' )
with open(Path(A_ ) / F'''log.{prefix}.stdout.txt''', '''w''' ) as f:
f.write(result.stdout )
with open(Path(A_ ) / F'''log.{prefix}.stderr.txt''', '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''', '''r''', encoding='''utf-8''' ) as f:
_lowerCamelCase : str = json.load(A_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def snake_case_ ( A_ : Dict, A_ : List[str], A_ : Optional[int], A_ : Any, A_ : List[Any], A_ : str, A_ : Union[str, Any], A_ : List[Any], A_ : Optional[Any], A_ : Any, ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[Any] = F'''{id}: {variation:<{longest_variation_len}}'''
_lowerCamelCase : int = F'''{preamble}: '''
_lowerCamelCase : List[str] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(A_ ), desc=A_, leave=A_ ):
_lowerCamelCase : Dict = process_run_single(
A_, A_, A_, A_, A_, A_, A_ )
_lowerCamelCase : Dict = single_run_metrics[target_metric_key]
if not math.isnan(A_ ):
metrics.append(A_ )
results.append(A_ )
outcome += "✓"
else:
outcome += "✘"
_lowerCamelCase : List[Any] = F'''\33[2K\r{outcome}'''
if len(A_ ) > 0:
_lowerCamelCase : str = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_lowerCamelCase : Optional[Any] = round(mean_metrics[target_metric_key], 2 )
_lowerCamelCase : List[Any] = F'''{outcome} {mean_target}'''
if len(A_ ) > 1:
results_str += F''' {tuple(round(A_, 2 ) for x in results )}'''
print(A_ )
_lowerCamelCase : Optional[Any] = variation
return mean_metrics
else:
print(A_ )
return {variation_key: variation, target_metric_key: nan}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def snake_case_ ( A_ : Union[str, Any], A_ : str, A_ : int, A_ : int, A_ : Any ):
'''simple docstring'''
_lowerCamelCase : str = pd.DataFrame(A_ )
_lowerCamelCase : Optional[Any] = '''variation'''
_lowerCamelCase : List[Any] = '''diff_%'''
_lowerCamelCase : Optional[int] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_lowerCamelCase : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(A_ ):
# as a fallback, use the minimal value as the sentinel
_lowerCamelCase : str = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(A_ ):
_lowerCamelCase : str = df.apply(
lambda A_ : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0, axis='''columns''', )
# re-order columns
_lowerCamelCase : str = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_lowerCamelCase : Tuple = df.reindex(A_, axis='''columns''' ) # reorder cols
# capitalize
_lowerCamelCase : Any = df.rename(str.capitalize, axis='''columns''' )
# make the cols as narrow as possible
_lowerCamelCase : int = df.rename(lambda A_ : c.replace('''_''', '''<br>''' ), axis='''columns''' )
_lowerCamelCase : int = df.rename(lambda A_ : c.replace('''_''', '''\n''' ), axis='''columns''' )
_lowerCamelCase : str = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=A_, floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=A_, floatfmt='''.2f''' )]
print('''\n\n'''.join(A_ ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''', default=A_, type=A_, required=A_, help='''Base cmd''', )
parser.add_argument(
'''--variations''', default=A_, type=A_, nargs='''+''', required=A_, help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''', )
parser.add_argument(
'''--base-variation''', default=A_, type=A_, help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''', )
parser.add_argument(
'''--target-metric-key''', default=A_, type=A_, required=A_, help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''', )
parser.add_argument(
'''--report-metric-keys''', default='''''', type=A_, help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''', )
parser.add_argument(
'''--repeat-times''', default=1, type=A_, help='''How many times to re-run each variation - an average will be reported''', )
parser.add_argument(
'''--output_dir''', default='''output_benchmark''', type=A_, help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''', )
parser.add_argument(
'''--verbose''', default=A_, action='''store_true''', help='''Whether to show the outputs of each run or just the benchmark progress''', )
_lowerCamelCase : Union[str, Any] = parser.parse_args()
_lowerCamelCase : List[str] = args.output_dir
Path(A_ ).mkdir(exist_ok=A_ )
_lowerCamelCase : List[Any] = get_base_command(A_, A_ )
# split each dimension into its --foo variations
_lowerCamelCase : Tuple = [list(map(str.strip, re.split(R'''\|''', A_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_lowerCamelCase : str = list(map(str.strip, map(''' '''.join, itertools.product(*A_ ) ) ) )
_lowerCamelCase : Optional[Any] = max(len(A_ ) for x in variations )
# split wanted keys
_lowerCamelCase : List[str] = args.report_metric_keys.split()
# capture prints into a log file for convenience
_lowerCamelCase : List[str] = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
_lowerCamelCase : Tuple = Tee(A_ )
print(F'''\n*** Running {len(A_ )} benchmarks:''' )
print(F'''Base command: {" ".join(A_ )}''' )
_lowerCamelCase : Tuple = '''variation'''
_lowerCamelCase : Optional[Any] = []
for id, variation in enumerate(tqdm(A_, desc='''Total completion: ''', leave=A_ ) ):
_lowerCamelCase : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1, A_, A_, A_, A_, args.target_metric_key, A_, args.repeat_times, A_, args.verbose, ) )
process_results(A_, args.target_metric_key, A_, args.base_variation, A_ )
if __name__ == "__main__":
main()
| 83 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : List[Any] = shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase : int = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
_lowerCamelCase : Optional[Any] = optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean()
_lowerCamelCase : Dict = -(labels.shape[-1] * loss.item())
_lowerCamelCase : Dict = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 83 | 1 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def snake_case_ ( A_ : List[Any], A_ : int=None ):
'''simple docstring'''
require_version(deps[pkg], A_ )
| 83 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase__ = '''docs/source/en/_toctree.yml'''
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = defaultdict(A_ )
for doc in model_doc:
counts[doc["local"]] += 1
_lowerCamelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCamelCase : List[str] = []
for duplicate_key in duplicates:
_lowerCamelCase : Dict = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(A_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(A_, key=lambda A_ : s["title"].lower() )
def snake_case_ ( A_ : Union[str, Any]=False ):
'''simple docstring'''
with open(A_, encoding='''utf-8''' ) as f:
_lowerCamelCase : Dict = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCamelCase : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCamelCase : Any = content[api_idx]['''sections''']
# Then to the model doc
_lowerCamelCase : str = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_lowerCamelCase : List[str] = api_doc[model_idx]['''sections''']
_lowerCamelCase : Tuple = [(idx, section) for idx, section in enumerate(A_ ) if '''sections''' in section]
_lowerCamelCase : Any = False
for idx, modality_doc in modalities_docs:
_lowerCamelCase : str = modality_doc['''sections''']
_lowerCamelCase : List[str] = clean_model_doc_toc(A_ )
if old_modality_doc != new_modality_doc:
_lowerCamelCase : Any = True
if overwrite:
_lowerCamelCase : Optional[int] = new_modality_doc
if diff:
if overwrite:
_lowerCamelCase : Any = model_doc
_lowerCamelCase : Optional[Any] = api_doc
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(A_, allow_unicode=A_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 83 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
_lowerCamelCase : Tuple = str(A_ )
_lowerCamelCase : int = ''''''.join(sorted(A_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case_ ( A_ : float = 99 ):
'''simple docstring'''
if not 0 < percent < 1_00:
raise ValueError('''solution() only accepts values from 0 to 100''' )
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[str] = 1
while True:
if check_bouncy(A_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 83 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = {'''facebook/bart-base''': BartForConditionalGeneration}
lowerCAmelCase__ = {'''facebook/bart-base''': BartTokenizer}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : str = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''', type=A_, default=A_, help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''', type=A_, default=5, help='''The maximum total input sequence length after tokenization.''', )
parser.add_argument(
'''--num_beams''', type=A_, default=A_, help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
), )
parser.add_argument(
'''--model_name_or_path''', type=A_, help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=A_, )
parser.add_argument(
'''--config_name''', type=A_, default=A_, help='''Pretrained config name or path if not the same as model_name''', )
parser.add_argument(
'''--device''', type=A_, default='''cpu''', help='''Device where the model will be run''', )
parser.add_argument('''--output_file_path''', type=A_, default=A_, help='''Where to store the final ONNX file.''' )
_lowerCamelCase : Optional[int] = parser.parse_args()
return args
def snake_case_ ( A_ : Optional[int], A_ : Tuple="cpu" ):
'''simple docstring'''
_lowerCamelCase : Dict = model_dict[model_name].from_pretrained(A_ ).to(A_ )
_lowerCamelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(A_ )
if model_name in ["facebook/bart-base"]:
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
_lowerCamelCase : str = 0
return huggingface_model, tokenizer
def snake_case_ ( A_ : Optional[int], A_ : Union[str, Any], A_ : str, A_ : List[str], A_ : Any ):
'''simple docstring'''
model.eval()
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Optional[Any] = torch.jit.script(BARTBeamSearchGenerator(A_ ) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = '''My friends are cool but they eat too many carbs.'''
_lowerCamelCase : Optional[Any] = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=10_24, return_tensors='''pt''' ).to(model.device )
_lowerCamelCase : Tuple = model.generate(
inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], num_beams=A_, max_length=A_, early_stopping=A_, decoder_start_token_id=model.config.decoder_start_token_id, )
torch.onnx.export(
A_, (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
), A_, opset_version=14, input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''], output_names=['''output_ids'''], dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
}, example_outputs=A_, )
logger.info('''Model exported to {}'''.format(A_ ) )
_lowerCamelCase : int = remove_dup_initializers(os.path.abspath(A_ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(A_ ) )
_lowerCamelCase : Tuple = onnxruntime.InferenceSession(A_ )
_lowerCamelCase : int = ort_sess.run(
A_, {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(A_ ),
'''max_length''': np.array(A_ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
}, )
np.testing.assert_allclose(summary_ids.cpu().numpy(), ort_out[0], rtol=1E-3, atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : str = parse_args()
_lowerCamelCase : List[Any] = 5
_lowerCamelCase : Any = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCamelCase : Optional[Any] = torch.device(args.device )
_lowerCamelCase , _lowerCamelCase : int = load_model_tokenizer(args.model_name_or_path, A_ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(A_ )
if args.max_length:
_lowerCamelCase : Union[str, Any] = args.max_length
if args.num_beams:
_lowerCamelCase : Optional[int] = args.num_beams
if args.output_file_path:
_lowerCamelCase : List[str] = args.output_file_path
else:
_lowerCamelCase : Union[str, Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(A_, A_, A_, A_, A_ )
if __name__ == "__main__":
main()
| 83 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def snake_case_ ( A_ : float, A_ : str, A_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( _lowercase):
snake_case__ : Tuple = ["input_features", "is_longer"]
def __init__( self : Optional[int] , __lowerCAmelCase : Tuple=6_4 , __lowerCAmelCase : Union[str, Any]=4_8_0_0_0 , __lowerCAmelCase : int=4_8_0 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : int=1_0_2_4 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : int=False , __lowerCAmelCase : float = 0 , __lowerCAmelCase : float = 1_4_0_0_0 , __lowerCAmelCase : int = None , __lowerCAmelCase : str = "fusion" , __lowerCAmelCase : str = "repeatpad" , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : str = top_db
_lowerCamelCase : Optional[int] = truncation
_lowerCamelCase : Tuple = padding
_lowerCamelCase : Any = fft_window_size
_lowerCamelCase : Tuple = (fft_window_size >> 1) + 1
_lowerCamelCase : int = hop_length
_lowerCamelCase : List[Any] = max_length_s
_lowerCamelCase : Optional[Any] = max_length_s * sampling_rate
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Tuple = frequency_min
_lowerCamelCase : List[Any] = frequency_max
_lowerCamelCase : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowerCAmelCase , min_frequency=__lowerCAmelCase , max_frequency=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , norm=__lowerCAmelCase , mel_scale='''htk''' , )
_lowerCamelCase : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowerCAmelCase , min_frequency=__lowerCAmelCase , max_frequency=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : np.array , __lowerCAmelCase : Optional[np.array] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = spectrogram(
__lowerCAmelCase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__lowerCAmelCase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowerCamelCase : Dict = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowerCamelCase : Dict = [0]
# randomly choose index for each part
_lowerCamelCase : Dict = np.random.choice(ranges[0] )
_lowerCamelCase : List[str] = np.random.choice(ranges[1] )
_lowerCamelCase : int = np.random.choice(ranges[2] )
_lowerCamelCase : Tuple = mel[idx_front : idx_front + chunk_frames, :]
_lowerCamelCase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowerCamelCase : Dict = mel[idx_back : idx_back + chunk_frames, :]
_lowerCamelCase : str = torch.tensor(mel[None, None, :] )
_lowerCamelCase : List[Any] = torch.nn.functional.interpolate(
__lowerCAmelCase , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=__lowerCAmelCase )
_lowerCamelCase : Dict = mel_shrink[0][0].numpy()
_lowerCamelCase : Optional[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : np.array , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowerCamelCase : Any = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowerCamelCase : int = len(__lowerCAmelCase ) - max_length
_lowerCamelCase : str = np.random.randint(0 , overflow + 1 )
_lowerCamelCase : List[str] = waveform[idx : idx + max_length]
_lowerCamelCase : List[Any] = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowerCamelCase : Any = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters )
_lowerCamelCase : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowerCamelCase : int = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowerCamelCase : str = np.stack([mel, mel, mel, mel] , axis=0 )
_lowerCamelCase : Any = False
else:
_lowerCamelCase : Any = self._random_mel_fusion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
_lowerCamelCase : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowerCamelCase : Tuple = int(max_length / len(__lowerCAmelCase ) )
_lowerCamelCase : Dict = np.stack(np.tile(__lowerCAmelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowerCamelCase : Optional[Any] = int(max_length / len(__lowerCAmelCase ) )
_lowerCamelCase : int = np.stack(np.tile(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = np.pad(__lowerCAmelCase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
_lowerCamelCase : Tuple = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters )
_lowerCamelCase : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowerCamelCase : List[Any] = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Optional[Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : str = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : List[str] = truncation if truncation is not None else self.truncation
_lowerCamelCase : Optional[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_lowerCamelCase : List[str] = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_lowerCamelCase : str = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : str = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
_lowerCamelCase : int = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Dict = [np.asarray(__lowerCAmelCase )]
# convert to mel spectrogram, truncate and pad if needed.
_lowerCamelCase : Dict = [
self._get_input_mel(__lowerCAmelCase , max_length if max_length else self.nb_max_samples , __lowerCAmelCase , __lowerCAmelCase )
for waveform in raw_speech
]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for mel, longer in padded_inputs:
input_mel.append(__lowerCAmelCase )
is_longer.append(__lowerCAmelCase )
if truncation == "fusion" and sum(__lowerCAmelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowerCamelCase : List[Any] = np.random.randint(0 , len(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = True
if isinstance(input_mel[0] , __lowerCAmelCase ):
_lowerCamelCase : Optional[int] = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowerCamelCase : Optional[Any] = [[longer] for longer in is_longer]
_lowerCamelCase : Tuple = {'''input_features''': input_mel, '''is_longer''': is_longer}
_lowerCamelCase : Union[str, Any] = BatchFeature(__lowerCAmelCase )
if return_tensors is not None:
_lowerCamelCase : List[str] = input_features.convert_to_tensors(__lowerCAmelCase )
return input_features
| 83 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="None" , __lowerCAmelCase : str=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : int = relative_attention
_lowerCamelCase : Union[str, Any] = position_biased_input
_lowerCamelCase : str = pos_att_type
_lowerCamelCase : Tuple = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Dict = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Tuple = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Optional[Any] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_lowerCamelCase : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 83 | 1 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase__ = datasets.logging.get_logger(__name__)
lowerCAmelCase__ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowerCAmelCase__ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowerCAmelCase__ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def snake_case_ ( A_ : Any, A_ : str, A_ : Optional[int]=False, A_ : str=False, A_ : Dict=True, A_ : Any=False, A_ : int="dummy_doc" ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {doc: key_lines}
_lowerCamelCase : Any = {doc: sys_lines}
_lowerCamelCase : str = {}
_lowerCamelCase : Any = 0
_lowerCamelCase : List[str] = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase , _lowerCamelCase : Dict = reader.get_doc_mentions(A_, key_doc_lines[doc], A_ )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCamelCase : Optional[int] = reader.set_annotated_parse_trees(A_, key_doc_lines[doc], A_, A_ )
_lowerCamelCase , _lowerCamelCase : Dict = reader.get_doc_mentions(A_, sys_doc_lines[doc], A_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCamelCase : Union[str, Any] = reader.set_annotated_parse_trees(A_, key_doc_lines[doc], A_, A_ )
if remove_nested:
_lowerCamelCase , _lowerCamelCase : Any = reader.remove_nested_coref_mentions(A_, A_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCamelCase , _lowerCamelCase : Tuple = reader.remove_nested_coref_mentions(A_, A_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCamelCase : Optional[int] = reader.get_mention_assignments(A_, A_ )
_lowerCamelCase : List[Any] = reader.get_mention_assignments(A_, A_ )
_lowerCamelCase : Tuple = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'''files, respectively''' )
return doc_coref_infos
def snake_case_ ( A_ : Any, A_ : Any, A_ : Any, A_ : List[str], A_ : int, A_ : Dict, A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : str = get_coref_infos(A_, A_, A_, A_, A_, A_ )
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Any = 0
_lowerCamelCase : str = 0
for name, metric in metrics:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = evaluator.evaluate_documents(A_, A_, beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ), F'''Recall: {recall * 1_00:.2f}''', F''' Precision: {precision * 1_00:.2f}''', F''' F1: {fa * 1_00:.2f}''', )
if conll_subparts_num == 3:
_lowerCamelCase : Any = (conll / 3) * 1_00
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
_lowerCamelCase : int = line.split()[5]
if not parse_col == "-":
_lowerCamelCase : Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : int=False ):
"""simple docstring"""
_lowerCamelCase : str = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
_lowerCamelCase : Tuple = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCamelCase : Dict = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 83 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83 | 1 |
"""simple docstring"""
from collections import defaultdict
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
_lowerCamelCase : List[str] = first_str.lower().strip()
_lowerCamelCase : List[str] = second_str.lower().strip()
# Remove whitespace
_lowerCamelCase : str = first_str.replace(''' ''', '''''' )
_lowerCamelCase : Dict = second_str.replace(''' ''', '''''' )
# Strings of different lengths are not anagrams
if len(A_ ) != len(A_ ):
return False
# Default values for count should be 0
_lowerCamelCase : defaultdict[str, int] = defaultdict(A_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(A_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ = input('''Enter the first string ''').strip()
lowerCAmelCase__ = input('''Enter the second string ''').strip()
lowerCAmelCase__ = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 83 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "cvt"
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[str]=[7, 3, 3] , __lowerCAmelCase : int=[4, 2, 2] , __lowerCAmelCase : int=[2, 1, 1] , __lowerCAmelCase : str=[6_4, 1_9_2, 3_8_4] , __lowerCAmelCase : Dict=[1, 3, 6] , __lowerCAmelCase : Optional[Any]=[1, 2, 1_0] , __lowerCAmelCase : Dict=[4.0, 4.0, 4.0] , __lowerCAmelCase : Dict=[0.0, 0.0, 0.0] , __lowerCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase : int=[0.0, 0.0, 0.1] , __lowerCAmelCase : Union[str, Any]=[True, True, True] , __lowerCAmelCase : str=[False, False, True] , __lowerCAmelCase : List[str]=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase : List[Any]=[3, 3, 3] , __lowerCAmelCase : Dict=[1, 1, 1] , __lowerCAmelCase : str=[2, 2, 2] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Any=1E-12 , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : int = patch_sizes
_lowerCamelCase : Optional[Any] = patch_stride
_lowerCamelCase : str = patch_padding
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Dict = depth
_lowerCamelCase : Optional[int] = mlp_ratio
_lowerCamelCase : Any = attention_drop_rate
_lowerCamelCase : Any = drop_rate
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : int = cls_token
_lowerCamelCase : int = qkv_projection_method
_lowerCamelCase : Optional[Any] = kernel_qkv
_lowerCamelCase : List[str] = padding_kv
_lowerCamelCase : Tuple = stride_kv
_lowerCamelCase : Union[str, Any] = padding_q
_lowerCamelCase : Optional[Any] = stride_q
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
| 83 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : List[Any] = 5
# Realm tok
_lowerCamelCase : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] , dtype=__lowerCAmelCase , )
return block_records
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_config()
_lowerCamelCase : Dict = self.get_dummy_retriever()
_lowerCamelCase : List[Any] = retriever.tokenizer
_lowerCamelCase : str = np.array([0, 3] , dtype='''long''' )
_lowerCamelCase : Optional[int] = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : str = tokenizer(
['''the fourth'''] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
_lowerCamelCase : Dict = config.reader_seq_len
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.get_config()
_lowerCamelCase : Dict = self.get_dummy_retriever()
_lowerCamelCase : List[Any] = retriever.tokenizer
_lowerCamelCase : Optional[int] = np.array([0, 3, 5] , dtype='''long''' )
_lowerCamelCase : Dict = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : List[Any] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
_lowerCamelCase : Optional[Any] = config.reader_seq_len
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_lowerCamelCase : str = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_lowerCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_lowerCamelCase : List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
| 83 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __snake_case :
def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Union[str, Any]=9_9 , __lowerCAmelCase : Union[str, Any]=3_6 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=5_1_2 , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Any=6 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Any=None , __lowerCAmelCase : Optional[Any]=1_0_0_0 , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : List[Any] = image_size
_lowerCamelCase : Optional[int] = patch_size
_lowerCamelCase : str = is_training
_lowerCamelCase : Tuple = use_input_mask
_lowerCamelCase : int = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Optional[Any] = type_vocab_size
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Dict = coordinate_size
_lowerCamelCase : Tuple = shape_size
_lowerCamelCase : str = num_labels
_lowerCamelCase : List[str] = num_choices
_lowerCamelCase : Union[str, Any] = scope
_lowerCamelCase : List[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCamelCase : Dict = text_seq_length
_lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
_lowerCamelCase : List[Any] = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_lowerCamelCase : Union[str, Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase : Optional[Any] = bbox[i, j, 3]
_lowerCamelCase : Dict = bbox[i, j, 1]
_lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase : Tuple = bbox[i, j, 2]
_lowerCamelCase : str = bbox[i, j, 0]
_lowerCamelCase : Optional[Any] = tmp_coordinate
_lowerCamelCase : List[Any] = tf.constant(__lowerCAmelCase )
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Any = None
if self.use_input_mask:
_lowerCamelCase : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCamelCase : int = None
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCamelCase : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = TFLayoutLMvaModel(config=__lowerCAmelCase )
# text + image
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , pixel_values=__lowerCAmelCase , training=__lowerCAmelCase )
_lowerCamelCase : int = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , training=__lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCamelCase : Tuple = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCamelCase : List[Any] = model({'''pixel_values''': pixel_values} , training=__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : Optional[Any] = TFLayoutLMvaForSequenceClassification(config=__lowerCAmelCase )
_lowerCamelCase : int = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : List[str] = TFLayoutLMvaForTokenClassification(config=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = TFLayoutLMvaForQuestionAnswering(config=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , training=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Union[str, Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ : List[Any] = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : int = False
snake_case__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ):
"""simple docstring"""
return True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=False ):
"""simple docstring"""
_lowerCamelCase : int = copy.deepcopy(__lowerCAmelCase )
if model_class in get_values(__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = {
k: tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__lowerCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_lowerCamelCase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowerCAmelCase ):
_lowerCamelCase : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__lowerCAmelCase ):
_lowerCamelCase : List[str] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = TFLayoutLMvaModelTester(self )
_lowerCamelCase : str = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
if getattr(__lowerCAmelCase , '''hf_compute_loss''' , __lowerCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
_lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_lowerCamelCase : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__lowerCAmelCase )[0]
]
_lowerCamelCase : Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowerCamelCase : Any = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_lowerCamelCase : Dict = prepared_for_class.pop('''input_ids''' )
_lowerCamelCase : int = model(__lowerCAmelCase , **__lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_lowerCamelCase : Tuple = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_lowerCamelCase : str = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
_lowerCamelCase : int = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_lowerCamelCase : Union[str, Any] = -1_0_0
_lowerCamelCase : List[str] = tf.convert_to_tensor(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , **__lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_lowerCamelCase : Tuple = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_lowerCamelCase : Tuple = model(__lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_lowerCamelCase : List[str] = self._prepare_for_class(inputs_dict.copy() , __lowerCAmelCase , return_labels=__lowerCAmelCase )
# Get keys that were added with the _prepare_for_class function
_lowerCamelCase : int = prepared_for_class.keys() - inputs_dict.keys()
_lowerCamelCase : Optional[Any] = inspect.signature(model.call ).parameters
_lowerCamelCase : Tuple = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_lowerCamelCase : Tuple = {0: '''input_ids'''}
for label_key in label_keys:
_lowerCamelCase : List[str] = signature_names.index(__lowerCAmelCase )
_lowerCamelCase : List[str] = label_key
_lowerCamelCase : int = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_lowerCamelCase : Dict = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_lowerCamelCase : str = prepared_for_class[value]
_lowerCamelCase : str = tuple(__lowerCAmelCase )
# Send to model
_lowerCamelCase : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Dict = type
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__lowerCAmelCase ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
_lowerCamelCase : Optional[int] = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' ).pixel_values
_lowerCamelCase : int = tf.constant([[1, 2]] )
_lowerCamelCase : Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_lowerCamelCase : List[Any] = model(input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase , pixel_values=__lowerCAmelCase , training=__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Optional[Any] = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 | 1 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = TypeVar('''DatasetType''', Dataset, IterableDataset)
def snake_case_ ( A_ : List[DatasetType], A_ : Optional[List[float]] = None, A_ : Optional[int] = None, A_ : Optional[DatasetInfo] = None, A_ : Optional[NamedSplit] = None, A_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(A_ ):
if not isinstance(A_, (Dataset, IterableDataset) ):
if isinstance(A_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(A_ )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A_ ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A_ ).__name__}.''' )
if i == 0:
_lowerCamelCase , _lowerCamelCase : Any = (
(Dataset, IterableDataset) if isinstance(A_, A_ ) else (IterableDataset, Dataset)
)
elif not isinstance(A_, A_ ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A_, A_, A_, info=A_, split=A_, stopping_strategy=A_ )
else:
return _interleave_iterable_datasets(
A_, A_, A_, info=A_, split=A_, stopping_strategy=A_ )
def snake_case_ ( A_ : List[DatasetType], A_ : Optional[DatasetInfo] = None, A_ : Optional[NamedSplit] = None, A_ : int = 0, ):
'''simple docstring'''
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(A_ ):
if not isinstance(A_, (Dataset, IterableDataset) ):
if isinstance(A_, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(A_ )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A_ ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A_ ).__name__}.''' )
if i == 0:
_lowerCamelCase , _lowerCamelCase : str = (
(Dataset, IterableDataset) if isinstance(A_, A_ ) else (IterableDataset, Dataset)
)
elif not isinstance(A_, A_ ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A_, info=A_, split=A_, axis=A_ )
else:
return _concatenate_iterable_datasets(A_, info=A_, split=A_, axis=A_ )
| 83 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def snake_case_ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F'''| 0 | 0 | {nor_gate(0, 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0, 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1, 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1, 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Optional[Any] = StableDiffusionXLImgaImgPipeline
snake_case__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
snake_case__ : int = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
_lowerCamelCase : Dict = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
_lowerCamelCase : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
_lowerCamelCase : Dict = CLIPTextModel(__lowerCAmelCase )
_lowerCamelCase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowerCAmelCase )
_lowerCamelCase : List[str] = CLIPTextModelWithProjection(__lowerCAmelCase )
_lowerCamelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowerCAmelCase )
_lowerCamelCase : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=0 ):
"""simple docstring"""
_lowerCamelCase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : str = image / 2 + 0.5
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : Any = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : str = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : int = self.get_dummy_components()
_lowerCamelCase : Any = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : List[Any] = sd_pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_dummy_components()
_lowerCamelCase : Dict = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = sd_pipe.to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# forward without prompt embeds
_lowerCamelCase : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : str = 3 * ['''this is a negative prompt''']
_lowerCamelCase : Dict = negative_prompt
_lowerCamelCase : int = 3 * [inputs['''prompt''']]
_lowerCamelCase : Dict = sd_pipe(**__lowerCAmelCase )
_lowerCamelCase : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCamelCase : List[str] = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : str = 3 * ['''this is a negative prompt''']
_lowerCamelCase : int = 3 * [inputs.pop('''prompt''' )]
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any = sd_pipe.encode_prompt(__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = sd_pipe(
**__lowerCAmelCase , prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , pooled_prompt_embeds=__lowerCAmelCase , negative_pooled_prompt_embeds=__lowerCAmelCase , )
_lowerCamelCase : str = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple="cpu" , __lowerCAmelCase : Tuple=torch.floataa , __lowerCAmelCase : Dict=0 ):
"""simple docstring"""
_lowerCamelCase : Tuple = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
_lowerCamelCase : List[Any] = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.get_inputs(__lowerCAmelCase )
_lowerCamelCase : int = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 83 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(A_ ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( _lowercase):
snake_case__ : Any = (KDPMaDiscreteScheduler,)
snake_case__ : Optional[int] = 1_0
def SCREAMING_SNAKE_CASE ( self : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config(prediction_type='''v_prediction''' )
_lowerCamelCase : Optional[Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Tuple = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = output.prev_sample
_lowerCamelCase : str = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if torch_device == "mps":
return
_lowerCamelCase : str = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Tuple = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : int = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
if torch_device == "mps":
return
_lowerCamelCase : Any = self.scheduler_classes[0]
_lowerCamelCase : List[Any] = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase )
_lowerCamelCase : Dict = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter.to(__lowerCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCamelCase : str = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Dict = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
if str(__lowerCAmelCase ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 83 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83 | 1 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(len(A_ ) ):
_lowerCamelCase : Optional[int] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_lowerCamelCase : int = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_lowerCamelCase : Tuple = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def snake_case_ ( A_ : list[list[int]], A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
for _ in range(A_ ):
# Create output image
_lowerCamelCase : Tuple = Image.new('''RGB''', (len(cells[0] ), len(A_ )) )
_lowerCamelCase : Any = img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
_lowerCamelCase : int = 2_55 - cells[y][x] * 2_55
_lowerCamelCase : Tuple = (colour, colour, colour)
# Save image
images.append(A_ )
_lowerCamelCase : Optional[Any] = new_generation(A_ )
return images
if __name__ == "__main__":
lowerCAmelCase__ = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 83 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83 | 1 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : pyspark.sql.DataFrame , __lowerCAmelCase : Optional[NamedSplit] = None , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "arrow" , **__lowerCAmelCase : Any , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Dict = load_from_cache_file
_lowerCamelCase : Union[str, Any] = file_format
_lowerCamelCase : Dict = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_lowerCamelCase : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 83 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = generator.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 83 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowerCAmelCase__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Any = torch.load(A_, map_location='''cpu''' )
return sd
def snake_case_ ( A_ : Any, A_ : List[str], A_ : Dict=rename_keys_prefix ):
'''simple docstring'''
_lowerCamelCase : int = OrderedDict()
_lowerCamelCase : Optional[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Any = new_key.replace(name_pair[0], name_pair[1] )
_lowerCamelCase : Tuple = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : Union[str, Any] = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def snake_case_ ( A_ : Dict, A_ : Dict ):
'''simple docstring'''
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : List[str] = '''pretraining'''
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : int = {'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
_lowerCamelCase : List[Any] = {'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Dict = {'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Dict = {'''visual_embedding_dim''': 5_12}
_lowerCamelCase : int = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {'''visual_embedding_dim''': 20_48}
_lowerCamelCase : str = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
_lowerCamelCase : List[str] = '''vqa'''
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
_lowerCamelCase : Union[str, Any] = '''nlvr'''
_lowerCamelCase : Any = VisualBertConfig(**A_ )
# Load State Dict
_lowerCamelCase : Any = load_state_dict(A_ )
_lowerCamelCase : Optional[int] = get_new_dict(A_, A_ )
if model_type == "pretraining":
_lowerCamelCase : Optional[int] = VisualBertForPreTraining(A_ )
elif model_type == "vqa":
_lowerCamelCase : List[Any] = VisualBertForQuestionAnswering(A_ )
elif model_type == "nlvr":
_lowerCamelCase : Optional[Any] = VisualBertForVisualReasoning(A_ )
elif model_type == "multichoice":
_lowerCamelCase : int = VisualBertForMultipleChoice(A_ )
model.load_state_dict(A_ )
# Save Checkpoints
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 | 1 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def snake_case_ ( A_ : Optional[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
_lowerCamelCase : Dict = Image.open(requests.get(A_, stream=A_ ).raw ).convert('''RGB''' )
_lowerCamelCase : Optional[int] = transforms.Compose(
[
transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711) ),
] )
_lowerCamelCase : List[Any] = transform(A_ ).unsqueeze(0 ).to(A_ )
return image
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
if "visual_encoder" in key:
_lowerCamelCase : Dict = re.sub('''visual_encoder*''', '''vision_model.encoder''', A_ )
if "blocks" in key:
_lowerCamelCase : Tuple = re.sub(R'''blocks''', '''layers''', A_ )
if "attn" in key:
_lowerCamelCase : str = re.sub(R'''attn''', '''self_attn''', A_ )
if "norm1" in key:
_lowerCamelCase : Optional[int] = re.sub(R'''norm1''', '''layer_norm1''', A_ )
if "norm2" in key:
_lowerCamelCase : int = re.sub(R'''norm2''', '''layer_norm2''', A_ )
if "encoder.norm" in key:
_lowerCamelCase : List[str] = re.sub(R'''encoder.norm''', '''post_layernorm''', A_ )
if "encoder.patch_embed.proj" in key:
_lowerCamelCase : str = re.sub(R'''encoder.patch_embed.proj''', '''embeddings.patch_embedding''', A_ )
if "encoder.pos_embed" in key:
_lowerCamelCase : List[str] = re.sub(R'''encoder.pos_embed''', '''embeddings.position_embedding''', A_ )
if "encoder.cls_token" in key:
_lowerCamelCase : int = re.sub(R'''encoder.cls_token''', '''embeddings.class_embedding''', A_ )
if "self_attn" in key:
_lowerCamelCase : Any = re.sub(R'''self_attn.proj''', '''self_attn.projection''', A_ )
return key
@torch.no_grad()
def snake_case_ ( A_ : Union[str, Any], A_ : Any=None ):
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : str = BlipConfig.from_pretrained(A_ )
else:
_lowerCamelCase : Any = BlipConfig(projection_dim=5_12, text_config={}, vision_config={} )
_lowerCamelCase : Optional[int] = BlipForConditionalGeneration(A_ ).eval()
_lowerCamelCase : Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
_lowerCamelCase : Tuple = blip_decoder(pretrained=A_, image_size=3_84, vit='''base''' )
_lowerCamelCase : str = pt_model.eval()
_lowerCamelCase : Tuple = pt_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase : int = modified_state_dict.pop(A_ )
_lowerCamelCase : int = rename_key(A_ )
_lowerCamelCase : List[str] = value
hf_model.load_state_dict(A_ )
_lowerCamelCase : Union[str, Any] = 3_84
_lowerCamelCase : Optional[int] = load_demo_image(image_size=A_, device='''cpu''' )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_lowerCamelCase : Optional[int] = tokenizer(['''a picture of'''] ).input_ids
_lowerCamelCase : int = hf_model.generate(A_, A_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_lowerCamelCase : Dict = hf_model.generate(A_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(A_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_lowerCamelCase : Union[str, Any] = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
_lowerCamelCase : Union[str, Any] = blip_vqa(pretrained=A_, image_size=A_, vit='''base''' )
vqa_model.eval()
_lowerCamelCase : Any = vqa_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase : Any = modified_state_dict.pop(A_ )
_lowerCamelCase : Tuple = rename_key(A_ )
_lowerCamelCase : List[str] = value
_lowerCamelCase : List[str] = BlipForQuestionAnswering(A_ )
hf_vqa_model.load_state_dict(A_ )
_lowerCamelCase : Optional[int] = ['''How many dogs are in this image?''']
_lowerCamelCase : Dict = tokenizer(A_, return_tensors='''pt''' ).input_ids
_lowerCamelCase : List[Any] = hf_vqa_model.generate(A_, A_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
_lowerCamelCase : List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
_lowerCamelCase : Optional[int] = blip_itm(pretrained=A_, image_size=A_, vit='''base''' )
itm_model.eval()
_lowerCamelCase : Optional[int] = itm_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase : Any = modified_state_dict.pop(A_ )
_lowerCamelCase : Optional[Any] = rename_key(A_ )
_lowerCamelCase : str = value
_lowerCamelCase : Union[str, Any] = BlipForImageTextRetrieval(A_ )
_lowerCamelCase : int = ['''A picture of a woman with a dog sitting in a beach''']
_lowerCamelCase : Optional[Any] = tokenizer(
A_, return_tensors='''pt''', padding='''max_length''', truncation=A_, max_length=35, ).input_ids
hf_itm_model.load_state_dict(A_ )
hf_itm_model.eval()
_lowerCamelCase : Union[str, Any] = hf_itm_model(A_, A_, use_itm_head=A_ )
_lowerCamelCase : str = hf_itm_model(A_, A_, use_itm_head=A_ )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0], dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 83 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __snake_case ( _lowercase):
snake_case__ : Tuple = ["image_processor", "tokenizer"]
snake_case__ : Dict = "BlipImageProcessor"
snake_case__ : Optional[Any] = "AutoTokenizer"
def __init__( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
# add QFormer tokenizer
_lowerCamelCase : Optional[Any] = qformer_tokenizer
def __call__( self : Optional[Any] , __lowerCAmelCase : ImageInput = None , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
_lowerCamelCase : int = BatchFeature()
if text is not None:
_lowerCamelCase : Union[str, Any] = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
encoding.update(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.qformer_tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = qformer_text_encoding.pop('''input_ids''' )
_lowerCamelCase : Optional[Any] = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
_lowerCamelCase : str = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
encoding.update(__lowerCAmelCase )
return encoding
def SCREAMING_SNAKE_CASE ( self : List[str] , *__lowerCAmelCase : str , **__lowerCAmelCase : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer.model_input_names
_lowerCamelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
if os.path.isfile(__lowerCAmelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__lowerCAmelCase )
return super().save_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(__lowerCAmelCase , subfolder='''qformer_tokenizer''' )
_lowerCamelCase : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
args.append(__lowerCAmelCase )
return cls(*__lowerCAmelCase )
| 83 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83 | 1 |
"""simple docstring"""
import random
def snake_case_ ( A_ : Optional[Any], A_ : List[str], A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = a[left_index]
_lowerCamelCase : int = left_index + 1
for j in range(left_index + 1, A_ ):
if a[j] < pivot:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = a[i], a[j]
i += 1
_lowerCamelCase , _lowerCamelCase : Dict = a[i - 1], a[left_index]
return i - 1
def snake_case_ ( A_ : Tuple, A_ : int, A_ : int ):
'''simple docstring'''
if left < right:
_lowerCamelCase : str = random.randint(A_, right - 1 )
_lowerCamelCase , _lowerCamelCase : Tuple = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_lowerCamelCase : List[str] = partition(A_, A_, A_ )
quick_sort_random(
A_, A_, A_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
A_, pivot_index + 1, A_ ) # recursive quicksort to the right of the pivot point
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = input('''Enter numbers separated by a comma:\n''' ).strip()
_lowerCamelCase : int = [int(A_ ) for item in user_input.split(''',''' )]
quick_sort_random(A_, 0, len(A_ ) )
print(A_ )
if __name__ == "__main__":
main()
| 83 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = "camembert"
def __init__( self : Optional[Any] , __lowerCAmelCase : Any=3_0_5_2_2 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : List[Any]=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : str="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : List[Any] = position_embedding_type
_lowerCamelCase : int = use_cache
_lowerCamelCase : List[str] = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 83 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self._create_example_records()
_lowerCamelCase : Tuple = Dataset.from_list(__lowerCAmelCase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(__lowerCAmelCase ):
self.assertDictEqual(__lowerCAmelCase , example_records[i] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Any = self._create_example_records()
_lowerCamelCase : Union[str, Any] = Dataset.from_list(__lowerCAmelCase )
_lowerCamelCase : List[str] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # checks what happens with missing columns
"""simple docstring"""
_lowerCamelCase : Any = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
_lowerCamelCase : str = Dataset.from_list(__lowerCAmelCase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def SCREAMING_SNAKE_CASE ( self : Dict ): # checks if the type can be inferred from the second record
"""simple docstring"""
_lowerCamelCase : List[str] = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
_lowerCamelCase : str = Dataset.from_list(__lowerCAmelCase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Any = Dataset.from_list([] )
self.assertEqual(len(__lowerCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 83 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def snake_case_ ( A_ : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path, args.datasets_directory )
class __snake_case ( _lowercase):
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : ArgumentParser ):
"""simple docstring"""
_lowerCamelCase : List[str] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_logger('''datasets-cli/converting''' )
_lowerCamelCase : int = tfds_path
_lowerCamelCase : Dict = datasets_directory
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowerCamelCase : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
_lowerCamelCase : int = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowerCamelCase : str = []
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : List[str] = os.listdir(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_lowerCamelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__lowerCAmelCase , encoding='''utf-8''' ) as f:
_lowerCamelCase : Tuple = f.readlines()
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = []
for line in lines:
_lowerCamelCase : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCamelCase : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
_lowerCamelCase : List[str] = ''''''
continue
elif "from absl import logging" in out_line:
_lowerCamelCase : str = '''from datasets import logging\n'''
elif "getLogger" in out_line:
_lowerCamelCase : Union[str, Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = list(filter(lambda __lowerCAmelCase : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + '''\n''' )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCamelCase : str = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCamelCase : Dict = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
_lowerCamelCase : Union[str, Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCamelCase : Any = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCamelCase : Union[str, Any] = f_name.replace('''.py''' , '''''' )
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowerCamelCase : Optional[int] = os.path.basename(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 83 | 1 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(A_, '''_dynamo''' ):
return False
return isinstance(A_, torch._dynamo.eval_frame.OptimizedModule )
def snake_case_ ( A_ : List[str], A_ : bool = True ):
'''simple docstring'''
_lowerCamelCase : Tuple = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_lowerCamelCase : Any = is_compiled_module(A_ )
if is_compiled:
_lowerCamelCase : Dict = model
_lowerCamelCase : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A_, A_ ):
_lowerCamelCase : List[Any] = model.module
if not keep_fpaa_wrapper:
_lowerCamelCase : int = getattr(A_, '''forward''' )
_lowerCamelCase : int = model.__dict__.pop('''_original_forward''', A_ )
if original_forward is not None:
while hasattr(A_, '''__wrapped__''' ):
_lowerCamelCase : Union[str, Any] = forward.__wrapped__
if forward == original_forward:
break
_lowerCamelCase : int = forward
if getattr(A_, '''_converted_to_transformer_engine''', A_ ):
convert_model(A_, to_transformer_engine=A_ )
if is_compiled:
_lowerCamelCase : Dict = model
_lowerCamelCase : int = compiled_model
return model
def snake_case_ ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def snake_case_ ( A_ : Optional[int], A_ : str ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A_, A_ )
elif PartialState().local_process_index == 0:
torch.save(A_, A_ )
@contextmanager
def snake_case_ ( **A_ : Any ):
'''simple docstring'''
for key, value in kwargs.items():
_lowerCamelCase : Union[str, Any] = str(A_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def snake_case_ ( A_ : Any ):
'''simple docstring'''
if not hasattr(A_, '''__qualname__''' ) and not hasattr(A_, '''__name__''' ):
_lowerCamelCase : Union[str, Any] = getattr(A_, '''__class__''', A_ )
if hasattr(A_, '''__qualname__''' ):
return obj.__qualname__
if hasattr(A_, '''__name__''' ):
return obj.__name__
return str(A_ )
def snake_case_ ( A_ : Any, A_ : Dict ):
'''simple docstring'''
for key, value in source.items():
if isinstance(A_, A_ ):
_lowerCamelCase : Optional[Any] = destination.setdefault(A_, {} )
merge_dicts(A_, A_ )
else:
_lowerCamelCase : Any = value
return destination
def snake_case_ ( A_ : int = None ):
'''simple docstring'''
if port is None:
_lowerCamelCase : Optional[int] = 2_95_00
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 83 |
"""simple docstring"""
def snake_case_ ( A_ : list, A_ : list, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : int = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = knapsack(A_, A_, A_, A_, index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Tuple = values[index] + knapsack(
A_, A_, A_, max_weight - weights[index], index + 1 )
return max(A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : Optional[Any] = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 83 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Any = inspect.getfile(accelerate.test_utils )
_lowerCamelCase : Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_lowerCamelCase : Optional[Any] = test_metrics
@require_cpu
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
_lowerCamelCase : str = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
| 83 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : List[Any] = shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase : int = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
_lowerCamelCase : Optional[Any] = optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean()
_lowerCamelCase : Dict = -(labels.shape[-1] * loss.item())
_lowerCamelCase : Dict = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 83 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = generator.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 83 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : float, A_ : float, A_ : float ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 | 1 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 83 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83 | 1 |
"""simple docstring"""
lowerCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Dict, A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = [False] * len(A_ )
_lowerCamelCase : Tuple = [s]
_lowerCamelCase : Any = True
while queue:
_lowerCamelCase : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[int] = u
return visited[t]
def snake_case_ ( A_ : Tuple, A_ : str, A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Any = [-1] * (len(A_ ))
_lowerCamelCase : Any = 0
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Tuple = [i[:] for i in graph] # Record original cut, copy.
while bfs(A_, A_, A_, A_ ):
_lowerCamelCase : int = float('''Inf''' )
_lowerCamelCase : List[str] = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : List[Any] = min(A_, graph[parent[s]][s] )
_lowerCamelCase : List[str] = parent[s]
max_flow += path_flow
_lowerCamelCase : Optional[Any] = sink
while v != source:
_lowerCamelCase : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : Optional[int] = parent[v]
for i in range(len(A_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 83 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def snake_case_ ( A_ : float, A_ : str, A_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase__ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
lowerCAmelCase__ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = VOCAB_FILES_NAMES
snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = ["input_ids", "attention_mask"]
snake_case__ : List[int] = []
snake_case__ : List[int] = []
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Union[str, Any]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : List[Any]="<pad>" , __lowerCAmelCase : List[str]="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
_lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : List[Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
_lowerCamelCase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCamelCase : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[Any] = len(self.sp_model )
_lowerCamelCase : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowerCAmelCase )
}
_lowerCamelCase : List[str] = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCamelCase : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCamelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCamelCase : Tuple = src_lang if src_lang is not None else '''en_XX'''
_lowerCamelCase : Optional[int] = self.lang_code_to_id[self._src_lang]
_lowerCamelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.__dict__.copy()
_lowerCamelCase : List[Any] = None
return state
def __setstate__( self : Optional[int] , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : Dict = self.sp_model.PieceToId(__lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = []
_lowerCamelCase : Any = ''''''
_lowerCamelCase : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
_lowerCamelCase : Dict = True
_lowerCamelCase : str = []
else:
current_sub_tokens.append(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : str = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
_lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
_lowerCamelCase : int = [1] * len(self.prefix_tokens )
_lowerCamelCase : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_lowerCamelCase : int = src_lang
_lowerCamelCase : Tuple = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : int = self.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en_XX" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro_RO" , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : str = self.lang_code_to_id[src_lang]
_lowerCamelCase : Tuple = [self.cur_lang_code_id]
_lowerCamelCase : str = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.lang_code_to_id[tgt_lang]
_lowerCamelCase : List[Any] = [self.cur_lang_code_id]
_lowerCamelCase : Any = [self.eos_token_id]
| 83 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="None" , __lowerCAmelCase : str=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : int = relative_attention
_lowerCamelCase : Union[str, Any] = position_biased_input
_lowerCamelCase : str = pos_att_type
_lowerCamelCase : Tuple = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Dict = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Tuple = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Optional[Any] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_lowerCamelCase : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 83 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase__ = [num for num in range(3, 100001, 2) if not is_prime(num)]
def snake_case_ ( A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_lowerCamelCase : Dict = []
for num in range(len(A_ ) ):
_lowerCamelCase : Dict = 0
while 2 * i * i <= odd_composites[num]:
_lowerCamelCase : int = odd_composites[num] - 2 * i * i
if is_prime(A_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A_ ) == n:
return list_nums
return []
def snake_case_ ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __snake_case ( unittest.TestCase):
def __init__( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : int=9_9 , __lowerCAmelCase : str=3_2 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=3_7 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=5_1_2 , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : int=4 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Dict = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Optional[int] = num_choices
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_attention_mask:
_lowerCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCAmelCase , )
return config, input_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = FlaxDistilBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class_name.from_pretrained('''distilbert-base-uncased''' )
_lowerCamelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCAmelCase )
@require_flax
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
_lowerCamelCase : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , __lowerCAmelCase )
_lowerCamelCase : List[Any] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) )
| 83 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "cvt"
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[str]=[7, 3, 3] , __lowerCAmelCase : int=[4, 2, 2] , __lowerCAmelCase : int=[2, 1, 1] , __lowerCAmelCase : str=[6_4, 1_9_2, 3_8_4] , __lowerCAmelCase : Dict=[1, 3, 6] , __lowerCAmelCase : Optional[Any]=[1, 2, 1_0] , __lowerCAmelCase : Dict=[4.0, 4.0, 4.0] , __lowerCAmelCase : Dict=[0.0, 0.0, 0.0] , __lowerCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase : int=[0.0, 0.0, 0.1] , __lowerCAmelCase : Union[str, Any]=[True, True, True] , __lowerCAmelCase : str=[False, False, True] , __lowerCAmelCase : List[str]=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase : List[Any]=[3, 3, 3] , __lowerCAmelCase : Dict=[1, 1, 1] , __lowerCAmelCase : str=[2, 2, 2] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Any=1E-12 , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : int = patch_sizes
_lowerCamelCase : Optional[Any] = patch_stride
_lowerCamelCase : str = patch_padding
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Dict = depth
_lowerCamelCase : Optional[int] = mlp_ratio
_lowerCamelCase : Any = attention_drop_rate
_lowerCamelCase : Any = drop_rate
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : int = cls_token
_lowerCamelCase : int = qkv_projection_method
_lowerCamelCase : Optional[Any] = kernel_qkv
_lowerCamelCase : List[str] = padding_kv
_lowerCamelCase : Tuple = stride_kv
_lowerCamelCase : Union[str, Any] = padding_q
_lowerCamelCase : Optional[Any] = stride_q
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
| 83 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def snake_case_ ( A_ : Callable[[int | float], int | float], A_ : int | float, A_ : int | float, A_ : int = 1_00, ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = x_start
_lowerCamelCase : int = fnc(A_ )
_lowerCamelCase : Optional[Any] = 0.0
for _ in range(A_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_lowerCamelCase : int = (x_end - x_start) / steps + xa
_lowerCamelCase : Tuple = fnc(A_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_lowerCamelCase : Optional[Any] = xa
_lowerCamelCase : Any = fxa
return area
if __name__ == "__main__":
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase__ = 10
while i <= 100000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 83 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''MaskFormerFeatureExtractor''']
lowerCAmelCase__ = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
lowerCAmelCase__ = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 83 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case_ ( A_ : Any, A_ : str, A_ : Dict=None, A_ : int=None, A_ : List[str]=None, A_ : int=None, A_ : Any=None, A_ : List[str]=None, ):
'''simple docstring'''
if attention_mask is None:
_lowerCamelCase : int = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
_lowerCamelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
_lowerCamelCase : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __snake_case :
def __init__( self : int , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=1_3 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=9_9 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Dict=3_2 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : List[Any]=0.02 , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : int = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : int = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[str] = eos_token_id
_lowerCamelCase : int = pad_token_id
_lowerCamelCase : Any = bos_token_id
_lowerCamelCase : Dict = initializer_range
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowerCamelCase : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowerCamelCase : Union[str, Any] = shift_tokens_right(__lowerCAmelCase , 1 , 2 )
_lowerCamelCase : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = prepare_blenderbot_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 2_0
_lowerCamelCase : int = model_class_name(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_lowerCamelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_lowerCamelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase : Tuple = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_lowerCamelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_lowerCamelCase : Any = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_lowerCamelCase : Union[str, Any] = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = 2_0
_lowerCamelCase : int = model_class_name(__lowerCAmelCase )
_lowerCamelCase : int = model.encode(inputs_dict['''input_ids'''] )
_lowerCamelCase , _lowerCamelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_lowerCamelCase : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCamelCase : int = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCamelCase : Tuple = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_lowerCamelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_lowerCamelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_lowerCamelCase : Any = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_lowerCamelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __snake_case ( unittest.TestCase):
snake_case__ : List[Any] = 9_9
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowerCamelCase : str = input_ids.shape[0]
_lowerCamelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = self._get_config_and_data()
_lowerCamelCase : List[Any] = FlaxBlenderbotForConditionalGeneration(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = lm_model(input_ids=__lowerCAmelCase )
_lowerCamelCase : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowerCamelCase : List[str] = FlaxBlenderbotForConditionalGeneration(__lowerCAmelCase )
_lowerCamelCase : List[str] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowerCamelCase : str = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowerCamelCase : Any = lm_model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase )
_lowerCamelCase : Dict = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowerCamelCase : str = shift_tokens_right(__lowerCAmelCase , 1 , 2 )
_lowerCamelCase : str = np.equal(__lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowerCamelCase : Any = np.equal(__lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __snake_case ( _lowercase , unittest.TestCase , _lowercase):
snake_case__ : List[str] = True
snake_case__ : List[str] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
snake_case__ : Optional[int] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = FlaxBlenderbotModelTester(self )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : Optional[Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(__lowerCAmelCase : Tuple , __lowerCAmelCase : int=None , **__lowerCAmelCase : int ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase : List[Any] = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase : int = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_lowerCamelCase : Union[str, Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase : Optional[int] = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase : int = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowerCamelCase : Dict = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowerCamelCase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowerCamelCase : Any = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Tuple = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
_lowerCamelCase : List[Any] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
_lowerCamelCase : List[Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=__lowerCAmelCase )
_lowerCamelCase : int = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
_lowerCamelCase : Tuple = ['''Sam''']
_lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase , return_tensors='''jax''' )
_lowerCamelCase : int = model.generate(**__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Any = '''Sam is a great name. It means "sun" in Gaelic.'''
_lowerCamelCase : int = tokenizer.batch_decode(__lowerCAmelCase , **__lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 83 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def snake_case_ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F'''| 0 | 0 | {nor_gate(0, 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0, 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1, 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1, 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[Any] = RobertaTokenizer
snake_case__ : int = RobertaTokenizerFast
snake_case__ : List[Any] = True
snake_case__ : Optional[Any] = {"cls_token": "<s>"}
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_lowerCamelCase : Dict = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase : Tuple = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = '''lower newer'''
_lowerCamelCase : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Optional[Any] = '''lower newer'''
_lowerCamelCase : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase : List[str] = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer_class.from_pretrained('''roberta-base''' )
_lowerCamelCase : int = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : List[str] = '''Encode this sequence.'''
_lowerCamelCase : str = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_lowerCamelCase : Any = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_lowerCamelCase : Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing spaces after special tokens
_lowerCamelCase : Tuple = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space
_lowerCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''Encode <mask> sequence'''
_lowerCamelCase : List[str] = '''Encode <mask>sequence'''
_lowerCamelCase : str = tokenizer.encode(__lowerCAmelCase )
_lowerCamelCase : Tuple = encoded.index(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = encoded.index(__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = '''A, <mask> AllenNLP sentence.'''
_lowerCamelCase : Optional[Any] = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_lowerCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_lowerCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_lowerCamelCase : Any = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCamelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __lowerCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __lowerCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : Any = f'''{text_of_1_token} {text_of_1_token}'''
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_lowerCamelCase : int = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_lowerCamelCase : int = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_lowerCamelCase : str = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
| 83 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(A_ ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __snake_case ( nn.Module):
snake_case__ : int
snake_case__ : int
snake_case__ : float = 0.0
snake_case__ : int = 1
snake_case__ : int = 1
snake_case__ : bool = True
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[Any] = []
for i in range(self.num_layers ):
_lowerCamelCase : Optional[int] = self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=__lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__lowerCAmelCase )
_lowerCamelCase : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = resnets
_lowerCamelCase : List[Any] = attentions
if self.add_downsample:
_lowerCamelCase : str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=True ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCamelCase : Optional[int] = resnet(__lowerCAmelCase , __lowerCAmelCase , deterministic=__lowerCAmelCase )
_lowerCamelCase : str = attn(__lowerCAmelCase , __lowerCAmelCase , deterministic=__lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Optional[Any] = self.downsamplers_a(__lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __snake_case ( nn.Module):
snake_case__ : int
snake_case__ : int
snake_case__ : float = 0.0
snake_case__ : int = 1
snake_case__ : bool = True
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = []
for i in range(self.num_layers ):
_lowerCamelCase : Any = self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=__lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__lowerCAmelCase )
_lowerCamelCase : Dict = resnets
if self.add_downsample:
_lowerCamelCase : Tuple = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=True ):
"""simple docstring"""
_lowerCamelCase : List[str] = ()
for resnet in self.resnets:
_lowerCamelCase : int = resnet(__lowerCAmelCase , __lowerCAmelCase , deterministic=__lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Union[str, Any] = self.downsamplers_a(__lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __snake_case ( nn.Module):
snake_case__ : int
snake_case__ : int
snake_case__ : int
snake_case__ : float = 0.0
snake_case__ : int = 1
snake_case__ : int = 1
snake_case__ : bool = True
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : List[str] = []
for i in range(self.num_layers ):
_lowerCamelCase : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : str = self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__lowerCAmelCase )
_lowerCamelCase : Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = resnets
_lowerCamelCase : Optional[Any] = attentions
if self.add_upsample:
_lowerCamelCase : str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Any=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCamelCase : List[Any] = res_hidden_states_tuple[-1]
_lowerCamelCase : Any = res_hidden_states_tuple[:-1]
_lowerCamelCase : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : List[str] = resnet(__lowerCAmelCase , __lowerCAmelCase , deterministic=__lowerCAmelCase )
_lowerCamelCase : Tuple = attn(__lowerCAmelCase , __lowerCAmelCase , deterministic=__lowerCAmelCase )
if self.add_upsample:
_lowerCamelCase : List[Any] = self.upsamplers_a(__lowerCAmelCase )
return hidden_states
class __snake_case ( nn.Module):
snake_case__ : int
snake_case__ : int
snake_case__ : int
snake_case__ : float = 0.0
snake_case__ : int = 1
snake_case__ : bool = True
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = []
for i in range(self.num_layers ):
_lowerCamelCase : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__lowerCAmelCase )
_lowerCamelCase : Tuple = resnets
if self.add_upsample:
_lowerCamelCase : str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_lowerCamelCase : Tuple = res_hidden_states_tuple[-1]
_lowerCamelCase : Union[str, Any] = res_hidden_states_tuple[:-1]
_lowerCamelCase : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : List[Any] = resnet(__lowerCAmelCase , __lowerCAmelCase , deterministic=__lowerCAmelCase )
if self.add_upsample:
_lowerCamelCase : int = self.upsamplers_a(__lowerCAmelCase )
return hidden_states
class __snake_case ( nn.Module):
snake_case__ : int
snake_case__ : float = 0.0
snake_case__ : int = 1
snake_case__ : int = 1
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCamelCase : Dict = []
for _ in range(self.num_layers ):
_lowerCamelCase : int = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__lowerCAmelCase )
_lowerCamelCase : Any = resnets
_lowerCamelCase : Tuple = attentions
def __call__( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any]=True ):
"""simple docstring"""
_lowerCamelCase : int = self.resnets[0](__lowerCAmelCase , __lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCamelCase : Any = attn(__lowerCAmelCase , __lowerCAmelCase , deterministic=__lowerCAmelCase )
_lowerCamelCase : Tuple = resnet(__lowerCAmelCase , __lowerCAmelCase , deterministic=__lowerCAmelCase )
return hidden_states
| 83 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83 | 1 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "speech_to_text_2"
snake_case__ : Dict = ["past_key_values"]
snake_case__ : Optional[Any] = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[str] , __lowerCAmelCase : Optional[Any]=1_0_0_0_0 , __lowerCAmelCase : str=6 , __lowerCAmelCase : Union[str, Any]=2_0_4_8 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Any=True , __lowerCAmelCase : str="relu" , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=1 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : int=2 , __lowerCAmelCase : int=1_0_2_4 , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Dict = d_model
_lowerCamelCase : Optional[Any] = decoder_ffn_dim
_lowerCamelCase : Any = decoder_layers
_lowerCamelCase : int = decoder_attention_heads
_lowerCamelCase : Union[str, Any] = dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : Any = activation_dropout
_lowerCamelCase : List[Any] = activation_function
_lowerCamelCase : Optional[Any] = init_std
_lowerCamelCase : List[Any] = decoder_layerdrop
_lowerCamelCase : Any = use_cache
_lowerCamelCase : Tuple = decoder_layers
_lowerCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : Dict = max_target_positions
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 83 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = generator.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 83 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCAmelCase__ = random.Random()
def snake_case_ ( A_ : Union[str, Any], A_ : Union[str, Any]=1.0, A_ : Optional[Any]=None, A_ : int=None ):
'''simple docstring'''
if rng is None:
_lowerCamelCase : int = global_rng
_lowerCamelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __snake_case ( unittest.TestCase):
def __init__( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=7 , __lowerCAmelCase : Any=4_0_0 , __lowerCAmelCase : Dict=2_0_0_0 , __lowerCAmelCase : str=1 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Union[str, Any]=1_6_0_0_0 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : str=8_0 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Dict=6_4 , __lowerCAmelCase : Any="hann_window" , __lowerCAmelCase : str=8_0 , __lowerCAmelCase : int=7_6_0_0 , __lowerCAmelCase : Optional[int]=1E-10 , __lowerCAmelCase : str=True , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Union[str, Any] = min_seq_length
_lowerCamelCase : List[Any] = max_seq_length
_lowerCamelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : str = feature_size
_lowerCamelCase : Optional[Any] = padding_value
_lowerCamelCase : Optional[Any] = sampling_rate
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : str = num_mel_bins
_lowerCamelCase : int = hop_length
_lowerCamelCase : Union[str, Any] = win_length
_lowerCamelCase : str = win_function
_lowerCamelCase : Tuple = fmin
_lowerCamelCase : Optional[int] = fmax
_lowerCamelCase : Dict = mel_floor
_lowerCamelCase : Optional[Any] = return_attention_mask
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Any=False ):
"""simple docstring"""
def _flatten(__lowerCAmelCase : Tuple ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
_lowerCamelCase : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCamelCase : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase : List[str] = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : int=False ):
"""simple docstring"""
if equal_length:
_lowerCamelCase : List[Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase : Dict = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase : Any = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Dict = SpeechTaFeatureExtractor
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = SpeechTaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : List[str] ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(__lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : Tuple = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
_lowerCamelCase : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_lowerCamelCase : Any = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test batched
_lowerCamelCase : Union[str, Any] = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
_lowerCamelCase : List[Any] = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : str = ['''longest''', '''max_length''', '''do_not_pad''']
_lowerCamelCase : Dict = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : List[Any] = feat_extract(__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Optional[Any] = range(8_0_0 , 1_4_0_0 , 2_0_0 )
_lowerCamelCase : List[Any] = [floats_list((1, x) )[0] for x in lengths]
_lowerCamelCase : Dict = ['''longest''', '''max_length''', '''do_not_pad''']
_lowerCamelCase : List[str] = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : List[str] = feat_extract(__lowerCAmelCase , max_length=__lowerCAmelCase , padding=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : List[str] = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
_lowerCamelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : str = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_lowerCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
_lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : Optional[Any] = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_lowerCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : int = np.random.rand(1_0_0 ).astype(np.floataa )
_lowerCamelCase : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : Optional[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCamelCase : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase : Optional[Any] = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase : List[Any] = feature_extractor(audio_target=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_lowerCamelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
_lowerCamelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test batched
_lowerCamelCase : str = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_values
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase : Tuple = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase : Optional[int] = np.asarray(__lowerCAmelCase )
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_values
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : str = feat_extract.model_input_names[0]
_lowerCamelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , processed_features[input_name] ) ) )
_lowerCamelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
_lowerCamelCase : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCamelCase : List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : List[str] = feat_extract.model_input_names[0]
_lowerCamelCase : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
_lowerCamelCase : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCamelCase : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase : Tuple = feat_extract.model_input_names[0]
_lowerCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase : Tuple = feat_extract.num_mel_bins # hack!
_lowerCamelCase : Any = feat_extract.pad(__lowerCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
_lowerCamelCase : List[str] = feat_extract.pad(__lowerCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.feat_extract_dict
_lowerCamelCase : List[str] = True
_lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**__lowerCAmelCase )
_lowerCamelCase : str = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase : List[str] = [len(__lowerCAmelCase ) for x in speech_inputs]
_lowerCamelCase : List[Any] = feat_extract.model_input_names[0]
_lowerCamelCase : str = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase : Optional[Any] = feat_extract.num_mel_bins # hack!
_lowerCamelCase : Dict = feat_extract.pad(__lowerCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.feat_extract_dict
_lowerCamelCase : List[str] = True
_lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase : int = [len(__lowerCAmelCase ) for x in speech_inputs]
_lowerCamelCase : Any = feat_extract.model_input_names[0]
_lowerCamelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase : Optional[int] = min(__lowerCAmelCase )
_lowerCamelCase : int = feat_extract.num_mel_bins # hack!
_lowerCamelCase : Dict = feat_extract.pad(
__lowerCAmelCase , padding='''max_length''' , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
from datasets import load_dataset
_lowerCamelCase : Union[str, Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase : Optional[Any] = ds.sort('''id''' ).select(range(__lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
_lowerCamelCase : Dict = self._load_datasamples(1 )
_lowerCamelCase : List[Any] = SpeechTaFeatureExtractor()
_lowerCamelCase : Optional[Any] = feature_extractor(__lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , __lowerCAmelCase , atol=1E-6 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
_lowerCamelCase : Dict = self._load_datasamples(1 )
_lowerCamelCase : Optional[Any] = SpeechTaFeatureExtractor()
_lowerCamelCase : List[Any] = feature_extractor(audio_target=__lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __lowerCAmelCase , atol=1E-4 ) )
| 83 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : int = 11
_lowerCamelCase : Any = int('''1''' + '''0''' * digit_len )
for num in range(A_, A_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(A_, A_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
_lowerCamelCase : Dict = 10
return solutions
def snake_case_ ( A_ : int = 2 ):
'''simple docstring'''
_lowerCamelCase : List[str] = 1.0
for fraction in fraction_list(A_ ):
_lowerCamelCase : List[Any] = Fraction(A_ )
result *= frac.denominator / frac.numerator
return int(A_ )
if __name__ == "__main__":
print(solution())
| 83 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 1 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
_enforce_args(A_, A_ )
if n == 0:
return 0
_lowerCamelCase : Tuple = float('''-inf''' )
for i in range(1, n + 1 ):
_lowerCamelCase : List[Any] = max(
A_, prices[i - 1] + naive_cut_rod_recursive(n - i, A_ ) )
return max_revue
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
_enforce_args(A_, A_ )
_lowerCamelCase : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(A_, A_, A_ )
def snake_case_ ( A_ : int, A_ : list, A_ : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCamelCase : Tuple = float('''-inf''' )
for i in range(1, n + 1 ):
_lowerCamelCase : int = max(
A_, prices[i - 1] + _top_down_cut_rod_recursive(n - i, A_, A_ ), )
_lowerCamelCase : int = max_revenue
return max_rev[n]
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
_enforce_args(A_, A_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCamelCase : Union[str, Any] = [float('''-inf''' ) for _ in range(n + 1 )]
_lowerCamelCase : Dict = 0
for i in range(1, n + 1 ):
_lowerCamelCase : Dict = max_rev[i]
for j in range(1, i + 1 ):
_lowerCamelCase : Optional[Any] = max(A_, prices[j - 1] + max_rev[i - j] )
_lowerCamelCase : Tuple = max_revenue_i
return max_rev[n]
def snake_case_ ( A_ : int, A_ : list ):
'''simple docstring'''
if n < 0:
_lowerCamelCase : List[Any] = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(A_ )
if n > len(A_ ):
_lowerCamelCase : Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F'''Got n = {n} but length of prices = {len(A_ )}'''
)
raise ValueError(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = [6, 10, 12, 15, 20, 23]
_lowerCamelCase : int = len(A_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCamelCase : Any = 36
_lowerCamelCase : Optional[int] = top_down_cut_rod(A_, A_ )
_lowerCamelCase : int = bottom_up_cut_rod(A_, A_ )
_lowerCamelCase : List[Any] = naive_cut_rod_recursive(A_, A_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 83 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = "camembert"
def __init__( self : Optional[Any] , __lowerCAmelCase : Any=3_0_5_2_2 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : List[Any]=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : str="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : List[Any] = position_embedding_type
_lowerCamelCase : int = use_cache
_lowerCamelCase : List[str] = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 83 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __snake_case ( _lowercase):
snake_case__ : Tuple = "gpt_neox"
def __init__( self : Optional[Any] , __lowerCAmelCase : Union[str, Any]=5_0_4_3_2 , __lowerCAmelCase : Optional[Any]=6_1_4_4 , __lowerCAmelCase : List[str]=4_4 , __lowerCAmelCase : Any=6_4 , __lowerCAmelCase : List[str]=2_4_5_7_6 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : List[Any]=0.25 , __lowerCAmelCase : str=1_0_0_0_0 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : int=2_0_4_8 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Optional[int]=1E-5 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : str=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Any , ):
"""simple docstring"""
super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = rotary_pct
_lowerCamelCase : str = rotary_emb_base
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : int = hidden_dropout
_lowerCamelCase : Dict = classifier_dropout
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : Any = tie_word_embeddings
_lowerCamelCase : List[str] = use_parallel_residual
_lowerCamelCase : List[Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
_lowerCamelCase : Optional[Any] = self.rope_scaling.get('''type''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.rope_scaling.get('''factor''' , __lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 83 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def snake_case_ ( A_ : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path, args.datasets_directory )
class __snake_case ( _lowercase):
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : ArgumentParser ):
"""simple docstring"""
_lowerCamelCase : List[str] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_logger('''datasets-cli/converting''' )
_lowerCamelCase : int = tfds_path
_lowerCamelCase : Dict = datasets_directory
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowerCamelCase : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
_lowerCamelCase : int = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowerCamelCase : str = []
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : List[str] = os.listdir(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_lowerCamelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__lowerCAmelCase , encoding='''utf-8''' ) as f:
_lowerCamelCase : Tuple = f.readlines()
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = []
for line in lines:
_lowerCamelCase : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCamelCase : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
_lowerCamelCase : List[str] = ''''''
continue
elif "from absl import logging" in out_line:
_lowerCamelCase : str = '''from datasets import logging\n'''
elif "getLogger" in out_line:
_lowerCamelCase : Union[str, Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = list(filter(lambda __lowerCAmelCase : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + '''\n''' )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCamelCase : str = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCamelCase : Dict = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
_lowerCamelCase : Union[str, Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCamelCase : Any = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCamelCase : Union[str, Any] = f_name.replace('''.py''' , '''''' )
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowerCamelCase : Optional[int] = os.path.basename(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 83 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : float ):
'''simple docstring'''
return 10 - x * x
def snake_case_ ( A_ : float, A_ : float ):
'''simple docstring'''
if equation(A_ ) * equation(A_ ) >= 0:
raise ValueError('''Wrong space!''' )
_lowerCamelCase : Any = a
while (b - a) >= 0.01:
# Find middle point
_lowerCamelCase : List[str] = (a + b) / 2
# Check if middle point is root
if equation(A_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A_ ) * equation(A_ ) < 0:
_lowerCamelCase : Optional[Any] = c
else:
_lowerCamelCase : Dict = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 83 |
"""simple docstring"""
def snake_case_ ( A_ : list, A_ : list, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : int = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = knapsack(A_, A_, A_, A_, index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Tuple = values[index] + knapsack(
A_, A_, A_, max_weight - weights[index], index + 1 )
return max(A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_lowerCamelCase : Tuple = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowerCAmelCase , cache_dir=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [t[-1] for t in os.walk(os.path.join(__lowerCAmelCase , os.listdir(__lowerCAmelCase )[0] , '''snapshots''' ) )]
_lowerCamelCase : int = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowerCAmelCase )
_lowerCamelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCamelCase : Optional[Any] = jax.random.PRNGKey(0 )
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : str = jax.device_count()
_lowerCamelCase : Tuple = num_samples * [prompt]
_lowerCamelCase : str = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
_lowerCamelCase : int = replicate(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = shard(__lowerCAmelCase )
_lowerCamelCase : int = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
_lowerCamelCase : List[str] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowerCAmelCase ) == num_samples
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__lowerCAmelCase )
_lowerCamelCase : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCamelCase : Optional[Any] = jax.random.PRNGKey(0 )
_lowerCamelCase : int = 5_0
_lowerCamelCase : List[str] = jax.device_count()
_lowerCamelCase : int = num_samples * [prompt]
_lowerCamelCase : Optional[Any] = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
_lowerCamelCase : str = replicate(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = shard(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase )
_lowerCamelCase : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCamelCase : List[str] = jax.random.PRNGKey(0 )
_lowerCamelCase : Optional[int] = 5_0
_lowerCamelCase : List[str] = jax.device_count()
_lowerCamelCase : Optional[Any] = num_samples * [prompt]
_lowerCamelCase : List[str] = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
_lowerCamelCase : int = replicate(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = shard(__lowerCAmelCase )
_lowerCamelCase : List[Any] = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
_lowerCamelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCamelCase : List[Any] = jax.random.PRNGKey(0 )
_lowerCamelCase : Union[str, Any] = 5_0
_lowerCamelCase : int = jax.device_count()
_lowerCamelCase : int = num_samples * [prompt]
_lowerCamelCase : str = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
_lowerCamelCase : List[str] = replicate(__lowerCAmelCase )
_lowerCamelCase : str = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = shard(__lowerCAmelCase )
_lowerCamelCase : int = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
_lowerCamelCase , _lowerCamelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
_lowerCamelCase : Union[str, Any] = scheduler.create_state()
_lowerCamelCase : Optional[int] = scheduler_state
_lowerCamelCase : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCamelCase : List[str] = jax.random.PRNGKey(0 )
_lowerCamelCase : Optional[Any] = 5_0
_lowerCamelCase : Optional[int] = jax.device_count()
_lowerCamelCase : Any = num_samples * [prompt]
_lowerCamelCase : int = pipeline.prepare_inputs(__lowerCAmelCase )
# shard inputs and rng
_lowerCamelCase : Optional[Any] = replicate(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = jax.random.split(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = shard(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(__lowerCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_lowerCamelCase : List[Any] = jax.device_count()
_lowerCamelCase : Optional[Any] = num_samples * [prompt]
_lowerCamelCase : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase , )
_lowerCamelCase : List[str] = replicate(__lowerCAmelCase )
_lowerCamelCase : List[str] = pipeline.prepare_inputs(__lowerCAmelCase )
_lowerCamelCase : List[str] = shard(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Union[str, Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
_lowerCamelCase , _lowerCamelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowerCAmelCase , use_memory_efficient_attention=__lowerCAmelCase , )
_lowerCamelCase : List[Any] = replicate(__lowerCAmelCase )
_lowerCamelCase : Any = pipeline.prepare_inputs(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = shard(__lowerCAmelCase )
_lowerCamelCase : List[Any] = pipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , jit=__lowerCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : List[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 83 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : Optional[Any] = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 83 | 1 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case_ ( A_ : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def snake_case_ ( A_ : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = '''Morse code here!'''
print(A_ )
_lowerCamelCase : Optional[Any] = encrypt(A_ )
print(A_ )
_lowerCamelCase : Optional[Any] = decrypt(A_ )
print(A_ )
if __name__ == "__main__":
main()
| 83 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : List[Any] = shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase : int = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
_lowerCamelCase : Optional[Any] = optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean()
_lowerCamelCase : Dict = -(labels.shape[-1] * loss.item())
_lowerCamelCase : Dict = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 83 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __snake_case ( _lowercase):
snake_case__ : int = "donut-swin"
snake_case__ : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , __lowerCAmelCase : str=2_2_4 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : List[Any]=9_6 , __lowerCAmelCase : int=[2, 2, 6, 2] , __lowerCAmelCase : List[Any]=[3, 6, 1_2, 2_4] , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[Any]=4.0 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : List[str]=1E-5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Tuple = len(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = num_heads
_lowerCamelCase : Optional[Any] = window_size
_lowerCamelCase : List[str] = mlp_ratio
_lowerCamelCase : List[str] = qkv_bias
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : List[str] = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
| 83 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83 | 1 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def snake_case_ ( A_ : int ):
'''simple docstring'''
random.seed(A_ )
np.random.seed(A_ )
torch.manual_seed(A_ )
torch.cuda.manual_seed_all(A_ )
# ^^ safe to call this function even if cuda is not available
class __snake_case :
def __init__( self : int , __lowerCAmelCase : Iterable[torch.nn.Parameter] , __lowerCAmelCase : float = 0.99_99 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Union[float, int] = 1.0 , __lowerCAmelCase : Union[float, int] = 2 / 3 , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : Dict[str, Any] = None , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , torch.nn.Module ):
_lowerCamelCase : Dict = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
_lowerCamelCase : int = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[int] = True
if kwargs.get('''max_value''' , __lowerCAmelCase ) is not None:
_lowerCamelCase : str = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
_lowerCamelCase : Any = kwargs['''max_value''']
if kwargs.get('''min_value''' , __lowerCAmelCase ) is not None:
_lowerCamelCase : Optional[int] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = kwargs['''min_value''']
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : int = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , __lowerCAmelCase ) is not None:
_lowerCamelCase : Tuple = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
self.to(device=kwargs['''device'''] )
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Tuple = decay
_lowerCamelCase : Any = min_decay
_lowerCamelCase : str = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Optional[Any] = inv_gamma
_lowerCamelCase : Tuple = power
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = None # set in `step()`
_lowerCamelCase : int = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase )
_lowerCamelCase : int = model_cls.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = cls(model.parameters() , model_cls=__lowerCAmelCase , model_config=model.config )
ema_model.load_state_dict(__lowerCAmelCase )
return ema_model
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_lowerCamelCase : Dict = self.model_cls.from_config(self.model_config )
_lowerCamelCase : int = self.state_dict()
state_dict.pop('''shadow_params''' , __lowerCAmelCase )
model.register_to_config(**__lowerCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : List[Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : str = (1 + step) / (1_0 + step)
_lowerCamelCase : List[Any] = min(__lowerCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : str = max(__lowerCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , torch.nn.Module ):
_lowerCamelCase : int = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
_lowerCamelCase : Dict = parameters.parameters()
_lowerCamelCase : List[Any] = list(__lowerCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Optional[int] = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : Union[str, Any] = 1 - decay
_lowerCamelCase : Dict = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : Optional[int] = deepspeed.zero.GatheredParameters(__lowerCAmelCase , modifier_rank=__lowerCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
_lowerCamelCase : List[str] = list(__lowerCAmelCase )
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
p.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ) if p.is_floating_point() else p.to(device=__lowerCAmelCase )
for p in self.shadow_params
]
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [param.detach().cpu().clone() for param in parameters]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , __lowerCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : str = None
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : dict ):
"""simple docstring"""
_lowerCamelCase : Any = copy.deepcopy(__lowerCAmelCase )
_lowerCamelCase : str = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_lowerCamelCase : Any = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , __lowerCAmelCase ):
raise ValueError('''Invalid min_decay''' )
_lowerCamelCase : Union[str, Any] = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , __lowerCAmelCase ):
raise ValueError('''Invalid optimization_step''' )
_lowerCamelCase : Optional[Any] = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , __lowerCAmelCase ):
raise ValueError('''Invalid update_after_step''' )
_lowerCamelCase : Tuple = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __lowerCAmelCase ):
raise ValueError('''Invalid use_ema_warmup''' )
_lowerCamelCase : int = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_lowerCamelCase : Tuple = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_lowerCamelCase : List[Any] = state_dict.get('''shadow_params''' , __lowerCAmelCase )
if shadow_params is not None:
_lowerCamelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , __lowerCAmelCase ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(__lowerCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 83 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = tuple[int, int, int]
lowerCAmelCase__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowerCAmelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
lowerCAmelCase__ = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
lowerCAmelCase__ = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
lowerCAmelCase__ = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
lowerCAmelCase__ = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
lowerCAmelCase__ = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
lowerCAmelCase__ = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
lowerCAmelCase__ = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
lowerCAmelCase__ = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
lowerCAmelCase__ = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
lowerCAmelCase__ = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def snake_case_ ( A_ : RotorPositionT, A_ : RotorSelectionT, A_ : str ):
'''simple docstring'''
if (unique_rotsel := len(set(A_ ) )) < 3:
_lowerCamelCase : List[Any] = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(A_ )
# Checks if rotor positions are valid
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = rotpos
if not 0 < rotorposa <= len(A_ ):
_lowerCamelCase : Any = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(A_ )
if not 0 < rotorposa <= len(A_ ):
_lowerCamelCase : Union[str, Any] = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A_ )
if not 0 < rotorposa <= len(A_ ):
_lowerCamelCase : Optional[Any] = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A_ )
# Validates string and returns dict
_lowerCamelCase : Optional[int] = _plugboard(A_ )
return rotpos, rotsel, pbdict
def snake_case_ ( A_ : str ):
'''simple docstring'''
if not isinstance(A_, A_ ):
_lowerCamelCase : List[Any] = F'''Plugboard setting isn\'t type string ({type(A_ )})'''
raise TypeError(A_ )
elif len(A_ ) % 2 != 0:
_lowerCamelCase : Optional[Any] = F'''Odd number of symbols ({len(A_ )})'''
raise Exception(A_ )
elif pbstring == "":
return {}
pbstring.replace(''' ''', '''''' )
# Checks if all characters are unique
_lowerCamelCase : Optional[Any] = set()
for i in pbstring:
if i not in abc:
_lowerCamelCase : int = F'''\'{i}\' not in list of symbols'''
raise Exception(A_ )
elif i in tmppbl:
_lowerCamelCase : Optional[int] = F'''Duplicate symbol ({i})'''
raise Exception(A_ )
else:
tmppbl.add(A_ )
del tmppbl
# Created the dictionary
_lowerCamelCase : int = {}
for j in range(0, len(A_ ) - 1, 2 ):
_lowerCamelCase : List[str] = pbstring[j + 1]
_lowerCamelCase : str = pbstring[j]
return pb
def snake_case_ ( A_ : str, A_ : RotorPositionT, A_ : RotorSelectionT = (rotora, rotora, rotora), A_ : str = "", ):
'''simple docstring'''
_lowerCamelCase : List[str] = text.upper()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = _validator(
A_, A_, plugb.upper() )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = rotor_position
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowerCamelCase : Any = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowerCamelCase : List[str] = plugboard[symbol]
# rotor ra --------------------------
_lowerCamelCase : Optional[Any] = abc.index(A_ ) + rotorposa
_lowerCamelCase : Any = rotora[index % len(A_ )]
# rotor rb --------------------------
_lowerCamelCase : int = abc.index(A_ ) + rotorposa
_lowerCamelCase : int = rotora[index % len(A_ )]
# rotor rc --------------------------
_lowerCamelCase : str = abc.index(A_ ) + rotorposa
_lowerCamelCase : Optional[int] = rotora[index % len(A_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowerCamelCase : Optional[Any] = reflector[symbol]
# 2nd rotors
_lowerCamelCase : List[Any] = abc[rotora.index(A_ ) - rotorposa]
_lowerCamelCase : Any = abc[rotora.index(A_ ) - rotorposa]
_lowerCamelCase : int = abc[rotora.index(A_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowerCamelCase : Optional[int] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(A_ ):
_lowerCamelCase : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(A_ ):
_lowerCamelCase : Dict = 0
rotorposa += 1
if rotorposa >= len(A_ ):
_lowerCamelCase : Dict = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(A_ )
return "".join(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = '''This is my Python script that emulates the Enigma machine from WWII.'''
lowerCAmelCase__ = (1, 1, 1)
lowerCAmelCase__ = '''pictures'''
lowerCAmelCase__ = (rotora, rotora, rotora)
lowerCAmelCase__ = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 83 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83 | 1 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def snake_case_ ( A_ : float, A_ : str, A_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : List[str] = VideoToVideoSDPipeline
snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
snake_case__ : Tuple = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ : str = False
# No `output_type`.
snake_case__ : int = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , )
_lowerCamelCase : str = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_lowerCamelCase : List[str] = CLIPTextModel(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any=0 ):
"""simple docstring"""
_lowerCamelCase : str = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : List[Any] = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : Tuple = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Dict = self.get_dummy_components()
_lowerCamelCase : int = VideoToVideoSDPipeline(**__lowerCAmelCase )
_lowerCamelCase : str = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = self.get_dummy_inputs(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = '''np'''
_lowerCamelCase : Tuple = sd_pipe(**__lowerCAmelCase ).frames
_lowerCamelCase : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
_lowerCamelCase : Dict = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCAmelCase , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_lowerCamelCase : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_lowerCamelCase : Optional[int] = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=__lowerCAmelCase )
_lowerCamelCase : List[Any] = video.to('''cuda''' )
_lowerCamelCase : Tuple = '''Spiderman is surfing'''
_lowerCamelCase : Any = pipe(__lowerCAmelCase , video=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=3 , output_type='''pt''' ).frames
_lowerCamelCase : Dict = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 83 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="None" , __lowerCAmelCase : str=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : int = relative_attention
_lowerCamelCase : Union[str, Any] = position_biased_input
_lowerCamelCase : str = pos_att_type
_lowerCamelCase : Tuple = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Dict = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Tuple = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = True
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Optional[Any] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = DebertaVaModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
_lowerCamelCase : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 83 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "conditional_detr"
snake_case__ : str = ["past_key_values"]
snake_case__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[Any]=3_0_0 , __lowerCAmelCase : Any=6 , __lowerCAmelCase : str=2_0_4_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=6 , __lowerCAmelCase : str=2_0_4_8 , __lowerCAmelCase : Optional[int]=8 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str="relu" , __lowerCAmelCase : Any=2_5_6 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Any=1.0 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]="sine" , __lowerCAmelCase : str="resnet50" , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : Any=1 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : int=0.25 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_lowerCamelCase : List[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = backbone_config.get('''model_type''' )
_lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : str = config_class.from_dict(__lowerCAmelCase )
_lowerCamelCase : List[str] = use_timm_backbone
_lowerCamelCase : List[Any] = backbone_config
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Dict = num_queries
_lowerCamelCase : Any = d_model
_lowerCamelCase : List[str] = encoder_ffn_dim
_lowerCamelCase : Optional[Any] = encoder_layers
_lowerCamelCase : Optional[int] = encoder_attention_heads
_lowerCamelCase : List[str] = decoder_ffn_dim
_lowerCamelCase : Optional[Any] = decoder_layers
_lowerCamelCase : Tuple = decoder_attention_heads
_lowerCamelCase : Dict = dropout
_lowerCamelCase : Dict = attention_dropout
_lowerCamelCase : List[Any] = activation_dropout
_lowerCamelCase : Optional[int] = activation_function
_lowerCamelCase : Tuple = init_std
_lowerCamelCase : List[str] = init_xavier_std
_lowerCamelCase : Optional[Any] = encoder_layerdrop
_lowerCamelCase : Tuple = decoder_layerdrop
_lowerCamelCase : Tuple = encoder_layers
_lowerCamelCase : str = auxiliary_loss
_lowerCamelCase : int = position_embedding_type
_lowerCamelCase : int = backbone
_lowerCamelCase : int = use_pretrained_backbone
_lowerCamelCase : Optional[Any] = dilation
# Hungarian matcher
_lowerCamelCase : str = class_cost
_lowerCamelCase : Dict = bbox_cost
_lowerCamelCase : List[Any] = giou_cost
# Loss coefficients
_lowerCamelCase : Union[str, Any] = mask_loss_coefficient
_lowerCamelCase : Tuple = dice_loss_coefficient
_lowerCamelCase : str = cls_loss_coefficient
_lowerCamelCase : Union[str, Any] = bbox_loss_coefficient
_lowerCamelCase : int = giou_loss_coefficient
_lowerCamelCase : int = focal_alpha
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.d_model
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_lowerCamelCase : int = self.backbone_config.to_dict()
_lowerCamelCase : List[str] = self.__class__.model_type
return output
class __snake_case ( _lowercase):
snake_case__ : Tuple = version.parse("1.11")
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return 1_2
| 83 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = torch.load(A_, map_location='''cpu''' )
if "model" in sd.keys():
_lowerCamelCase : List[str] = torch.load(A_, map_location='''cpu''' )['''model''']
# pop unnecessary weights
_lowerCamelCase : Tuple = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A_ )
_lowerCamelCase : Dict = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : int = sd.pop(A_ )
_lowerCamelCase : List[Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Tuple = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : Any = key.replace('''.qkv_proj.''', '''.q_proj.''' )
_lowerCamelCase : Dict = key.replace('''.qkv_proj.''', '''.k_proj.''' )
_lowerCamelCase : int = key.replace('''.qkv_proj.''', '''.v_proj.''' )
_lowerCamelCase : Tuple = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = torch.split(A_, depth // 3, dim=0 )
_lowerCamelCase : Optional[Any] = q
_lowerCamelCase : Tuple = k
_lowerCamelCase : Tuple = v
del sd[key]
return sd
@torch.no_grad()
def snake_case_ ( A_ : Dict, A_ : Optional[int], A_ : Union[str, Any]=None ):
'''simple docstring'''
_lowerCamelCase : Dict = load_checkpoint(A_ )
if config is not None:
_lowerCamelCase : List[Any] = OPTConfig.from_pretrained(A_ )
else:
_lowerCamelCase : Union[str, Any] = OPTConfig()
_lowerCamelCase : Optional[Any] = OPTModel(A_ ).half().eval()
model.load_state_dict(A_ )
# Check results
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
lowerCAmelCase__ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 83 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "cvt"
def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[str]=[7, 3, 3] , __lowerCAmelCase : int=[4, 2, 2] , __lowerCAmelCase : int=[2, 1, 1] , __lowerCAmelCase : str=[6_4, 1_9_2, 3_8_4] , __lowerCAmelCase : Dict=[1, 3, 6] , __lowerCAmelCase : Optional[Any]=[1, 2, 1_0] , __lowerCAmelCase : Dict=[4.0, 4.0, 4.0] , __lowerCAmelCase : Dict=[0.0, 0.0, 0.0] , __lowerCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase : int=[0.0, 0.0, 0.1] , __lowerCAmelCase : Union[str, Any]=[True, True, True] , __lowerCAmelCase : str=[False, False, True] , __lowerCAmelCase : List[str]=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase : List[Any]=[3, 3, 3] , __lowerCAmelCase : Dict=[1, 1, 1] , __lowerCAmelCase : str=[2, 2, 2] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : Optional[Any]=[1, 1, 1] , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Any=1E-12 , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : int = patch_sizes
_lowerCamelCase : Optional[Any] = patch_stride
_lowerCamelCase : str = patch_padding
_lowerCamelCase : Any = embed_dim
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Dict = depth
_lowerCamelCase : Optional[int] = mlp_ratio
_lowerCamelCase : Any = attention_drop_rate
_lowerCamelCase : Any = drop_rate
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : int = cls_token
_lowerCamelCase : int = qkv_projection_method
_lowerCamelCase : Optional[Any] = kernel_qkv
_lowerCamelCase : List[str] = padding_kv
_lowerCamelCase : Tuple = stride_kv
_lowerCamelCase : Union[str, Any] = padding_q
_lowerCamelCase : Optional[Any] = stride_q
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
| 83 | 1 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class __snake_case ( _lowercase):
@add_start_docstrings(__lowerCAmelCase )
def __call__( self : List[str] , __lowerCAmelCase : torch.LongTensor , __lowerCAmelCase : torch.FloatTensor , **__lowerCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class __snake_case ( _lowercase):
def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
_lowerCamelCase : Dict = max_length
_lowerCamelCase : int = max_position_embeddings
@add_start_docstrings(__lowerCAmelCase )
def __call__( self : Tuple , __lowerCAmelCase : torch.LongTensor , __lowerCAmelCase : torch.FloatTensor , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = input_ids.shape[-1]
_lowerCamelCase : Union[str, Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class __snake_case ( _lowercase):
def __init__( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'''with `max_length = start_length + max_new_tokens` instead.''' , __lowerCAmelCase , )
_lowerCamelCase : Dict = start_length
_lowerCamelCase : str = max_new_tokens
_lowerCamelCase : str = start_length + max_new_tokens
@add_start_docstrings(__lowerCAmelCase )
def __call__( self : str , __lowerCAmelCase : torch.LongTensor , __lowerCAmelCase : torch.FloatTensor , **__lowerCAmelCase : Any ):
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class __snake_case ( _lowercase):
def __init__( self : Union[str, Any] , __lowerCAmelCase : float , __lowerCAmelCase : Optional[float] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = max_time
_lowerCamelCase : Any = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCAmelCase )
def __call__( self : Union[str, Any] , __lowerCAmelCase : torch.LongTensor , __lowerCAmelCase : torch.FloatTensor , **__lowerCAmelCase : Dict ):
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class __snake_case ( _lowercase):
@add_start_docstrings(__lowerCAmelCase )
def __call__( self : List[str] , __lowerCAmelCase : torch.LongTensor , __lowerCAmelCase : torch.FloatTensor , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
return any(criteria(__lowerCAmelCase , __lowerCAmelCase ) for criteria in self )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for stopping_criterium in self:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return stopping_criterium.max_length
return None
def snake_case_ ( A_ : StoppingCriteriaList, A_ : int ):
'''simple docstring'''
_lowerCamelCase : Tuple = stopping_criteria.max_length
_lowerCamelCase : str = deepcopy(A_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''', A_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=A_ ) )
return new_stopping_criteria
| 83 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 | 1 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowerCAmelCase__ = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def snake_case_ ( A_ : Optional[int], A_ : Any, A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : Tuple = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
_lowerCamelCase : Union[str, Any] = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''', A_, )
is not None
):
_lowerCamelCase : str = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_lowerCamelCase : Optional[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_lowerCamelCase : str = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
_lowerCamelCase : Optional[Any] = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
_lowerCamelCase : Any = True
if not attribute_used:
_lowerCamelCase : Tuple = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_lowerCamelCase : str = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_lowerCamelCase : Dict = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_lowerCamelCase : List[str] = True
elif attribute.endswith('''_token_id''' ):
_lowerCamelCase : Optional[int] = True
# configuration class specific cases
if not case_allowed:
_lowerCamelCase : Optional[int] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] )
_lowerCamelCase : List[str] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
_lowerCamelCase : List[Any] = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
_lowerCamelCase : Union[str, Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_lowerCamelCase : Union[str, Any] = {}
if len(config_class.attribute_map ) > 0:
_lowerCamelCase : List[Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_lowerCamelCase : Dict = inspect.getsourcefile(A_ )
_lowerCamelCase : Dict = os.path.dirname(A_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_lowerCamelCase : List[Any] = [os.path.join(A_, A_ ) for fn in os.listdir(A_ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
_lowerCamelCase : Dict = []
for path in modeling_paths:
if os.path.isfile(A_ ):
with open(A_ ) as fp:
modeling_sources.append(fp.read() )
_lowerCamelCase : Dict = []
for config_param, default_value in zip(A_, A_ ):
# `attributes` here is all the variant names for `config_param`
_lowerCamelCase : Tuple = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A_, A_, A_, A_ ):
unused_attributes.append(attributes[0] )
return sorted(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_lowerCamelCase : Dict = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ), lambda A_ : inspect.isclass(A_ )
and issubclass(A_, A_ )
and inspect.getmodule(A_ ) == inspect.getmodule(_config_class ), )
]
for config_class in config_classes_in_module:
_lowerCamelCase : Union[str, Any] = check_config_attributes_being_used(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Optional[int] = unused_attributes
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(A_ )
if __name__ == "__main__":
check_config_attributes()
| 83 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def snake_case_ ( A_ : str="" ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(A_, str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase : List[str] = AgentAudio(__lowerCAmelCase )
_lowerCamelCase : Dict = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__lowerCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase , torch.tensor(__lowerCAmelCase ) , atol=1E-4 ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase : Union[str, Any] = get_new_path(suffix='''.wav''' )
sf.write(__lowerCAmelCase , __lowerCAmelCase , 1_6_0_0_0 )
_lowerCamelCase : Optional[Any] = AgentAudio(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , __lowerCAmelCase )
@require_vision
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
_lowerCamelCase : Tuple = AgentImage(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__lowerCAmelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : str = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase : Optional[int] = Image.open(__lowerCAmelCase )
_lowerCamelCase : List[str] = AgentImage(__lowerCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase : Dict = Image.open(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AgentImage(__lowerCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__lowerCAmelCase ) )
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = '''Hey!'''
_lowerCamelCase : int = AgentText(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , agent_type.to_string() )
self.assertEqual(__lowerCAmelCase , agent_type.to_raw() )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 83 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def snake_case_ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F'''| 0 | 0 | {nor_gate(0, 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0, 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1, 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1, 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 1 |
"""simple docstring"""
import numpy as np
lowerCAmelCase__ = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class __snake_case :
def __init__( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = np.array(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = np.where(letter == self.SQUARE )
_lowerCamelCase : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = self.SQUARE[indexa - 1, indexa - 1]
return letter
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = message.lower()
_lowerCamelCase : List[Any] = message.replace(''' ''' , '''''' )
_lowerCamelCase : str = message.replace('''j''' , '''i''' )
_lowerCamelCase : Optional[Any] = np.empty((2, len(__lowerCAmelCase )) )
for letter_index in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : int = self.letter_to_numbers(message[letter_index] )
_lowerCamelCase : Dict = numbers[0]
_lowerCamelCase : Optional[int] = numbers[1]
_lowerCamelCase : Tuple = first_step.reshape(2 * len(__lowerCAmelCase ) )
_lowerCamelCase : str = ''''''
for numbers_index in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : Any = int(second_step[numbers_index * 2] )
_lowerCamelCase : Tuple = int(second_step[(numbers_index * 2) + 1] )
_lowerCamelCase : int = self.numbers_to_letter(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = encoded_message + letter
return encoded_message
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = message.lower()
message.replace(''' ''' , '''''' )
_lowerCamelCase : Tuple = np.empty(2 * len(__lowerCAmelCase ) )
for letter_index in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : Optional[int] = self.letter_to_numbers(message[letter_index] )
_lowerCamelCase : Optional[int] = numbers[0]
_lowerCamelCase : Union[str, Any] = numbers[1]
_lowerCamelCase : Tuple = first_step.reshape((2, len(__lowerCAmelCase )) )
_lowerCamelCase : Union[str, Any] = ''''''
for numbers_index in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : Dict = int(second_step[0, numbers_index] )
_lowerCamelCase : Union[str, Any] = int(second_step[1, numbers_index] )
_lowerCamelCase : Dict = self.numbers_to_letter(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Any = decoded_message + letter
return decoded_message
| 83 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[list[int]] ):
'''simple docstring'''
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(A_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(A_ ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''Hello, World!'''
lowerCAmelCase__ = '''en_XX'''
def snake_case_ ( A_ : str, A_ : str, A_ : bool ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = Path('''data_bin''' )
_lowerCamelCase : List[str] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(A_ ).parent ), checkpoint_file=Path(A_ ).name, _name='''xmod_base''', arch='''xmod_base''', task='''multilingual_masked_lm''', data_name_or_path=str(A_ ), bpe='''sentencepiece''', sentencepiece_model=str(Path(A_ ).parent / '''sentencepiece.bpe.model''' ), src_dict=str(data_dir / '''dict.txt''' ), )
xmod.eval() # disable dropout
print(A_ )
_lowerCamelCase : Optional[int] = xmod.model.encoder.sentence_encoder
_lowerCamelCase : str = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=5_14, type_vocab_size=1, layer_norm_eps=1E-5, pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, '''bottleneck''', 2 ), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages, )
if classification_head:
_lowerCamelCase : List[str] = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''', A_ )
_lowerCamelCase : Optional[int] = XmodForSequenceClassification(A_ ) if classification_head else XmodForMaskedLM(A_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCamelCase : Optional[int] = xmod_sent_encoder.embed_tokens.weight
_lowerCamelCase : int = xmod_sent_encoder.embed_positions.weight
_lowerCamelCase : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_lowerCamelCase : Optional[int] = xmod_sent_encoder.layernorm_embedding.weight
_lowerCamelCase : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCamelCase : Optional[int] = model.roberta.encoder.layer[i]
_lowerCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
_lowerCamelCase : int = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
_lowerCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.weight
_lowerCamelCase : List[Any] = xmod_layer.self_attn.q_proj.bias
_lowerCamelCase : Optional[int] = xmod_layer.self_attn.k_proj.weight
_lowerCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
_lowerCamelCase : str = xmod_layer.self_attn.v_proj.weight
_lowerCamelCase : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
_lowerCamelCase : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
_lowerCamelCase : Dict = xmod_layer.self_attn.out_proj.weight
_lowerCamelCase : Tuple = xmod_layer.self_attn.out_proj.bias
_lowerCamelCase : Optional[Any] = xmod_layer.self_attn_layer_norm.weight
_lowerCamelCase : Tuple = xmod_layer.self_attn_layer_norm.bias
# intermediate
_lowerCamelCase : List[str] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
_lowerCamelCase : int = xmod_layer.fca.weight
_lowerCamelCase : int = xmod_layer.fca.bias
# output
_lowerCamelCase : int = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
_lowerCamelCase : Tuple = xmod_layer.fca.weight
_lowerCamelCase : Dict = xmod_layer.fca.bias
_lowerCamelCase : Tuple = xmod_layer.final_layer_norm.weight
_lowerCamelCase : str = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_lowerCamelCase : List[str] = xmod_layer.adapter_layer_norm.weight
_lowerCamelCase : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_lowerCamelCase : List[str] = bert_output.adapter_modules[lang_code]
_lowerCamelCase : Optional[Any] = xmod_layer.adapter_modules[lang_code]
_lowerCamelCase : Tuple = from_adapter.fca.weight
_lowerCamelCase : str = from_adapter.fca.bias
_lowerCamelCase : int = from_adapter.fca.weight
_lowerCamelCase : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_lowerCamelCase : List[str] = xmod_sent_encoder.layer_norm.weight
_lowerCamelCase : Union[str, Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
_lowerCamelCase : Tuple = xmod.model.classification_heads['''mnli'''].dense.weight
_lowerCamelCase : Optional[Any] = xmod.model.classification_heads['''mnli'''].dense.bias
_lowerCamelCase : Any = xmod.model.classification_heads['''mnli'''].out_proj.weight
_lowerCamelCase : int = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCamelCase : str = xmod.model.encoder.lm_head.dense.weight
_lowerCamelCase : List[Any] = xmod.model.encoder.lm_head.dense.bias
_lowerCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
_lowerCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.bias
_lowerCamelCase : int = xmod.model.encoder.lm_head.weight
_lowerCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCamelCase : int = xmod.encode(A_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(A_ )
_lowerCamelCase : List[Any] = model(A_ )[0]
if classification_head:
_lowerCamelCase : str = xmod.model.classification_heads['''mnli'''](xmod.extract_features(A_ ) )
else:
_lowerCamelCase : Optional[int] = xmod.model(A_, lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape, their_output.shape )
_lowerCamelCase : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_lowerCamelCase : List[Any] = torch.allclose(A_, A_, atol=1E-3 )
print('''Do both models output the same tensors?''', '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(A_ ).mkdir(parents=A_, exist_ok=A_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 83 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83 | 1 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __snake_case ( tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__( self : int , __lowerCAmelCase : float , __lowerCAmelCase : Callable , __lowerCAmelCase : int , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : str = None , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : str = initial_learning_rate
_lowerCamelCase : List[str] = warmup_steps
_lowerCamelCase : Union[str, Any] = power
_lowerCamelCase : Any = decay_schedule_fn
_lowerCamelCase : Optional[int] = name
def __call__( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_lowerCamelCase : Dict = tf.cast(__lowerCAmelCase , tf.floataa )
_lowerCamelCase : str = tf.cast(self.warmup_steps , tf.floataa )
_lowerCamelCase : Dict = global_step_float / warmup_steps_float
_lowerCamelCase : Union[str, Any] = self.initial_learning_rate * tf.math.pow(__lowerCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def snake_case_ ( A_ : float, A_ : int, A_ : int, A_ : float = 0.0, A_ : float = 0.9, A_ : float = 0.999, A_ : float = 1E-8, A_ : Optional[float] = None, A_ : Optional[float] = None, A_ : float = 0.0, A_ : float = 1.0, A_ : Optional[List[str]] = None, ):
'''simple docstring'''
_lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=A_, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=A_, )
if num_warmup_steps:
_lowerCamelCase : Tuple = WarmUp(
initial_learning_rate=A_, decay_schedule_fn=A_, warmup_steps=A_, )
if weight_decay_rate > 0.0:
_lowerCamelCase : Tuple = AdamWeightDecay(
learning_rate=A_, weight_decay_rate=A_, beta_a=A_, beta_a=A_, epsilon=A_, clipnorm=A_, global_clipnorm=A_, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=A_, )
else:
_lowerCamelCase : Union[str, Any] = tf.keras.optimizers.Adam(
learning_rate=A_, beta_a=A_, beta_a=A_, epsilon=A_, clipnorm=A_, global_clipnorm=A_, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __snake_case ( _lowercase):
def __init__( self : int , __lowerCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_01 , __lowerCAmelCase : float = 0.9 , __lowerCAmelCase : float = 0.9_99 , __lowerCAmelCase : float = 1E-7 , __lowerCAmelCase : bool = False , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "AdamWeightDecay" , **__lowerCAmelCase : int , ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = weight_decay_rate
_lowerCamelCase : List[str] = include_in_weight_decay
_lowerCamelCase : Tuple = exclude_from_weight_decay
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = {'''WarmUp''': WarmUp}
return super(__lowerCAmelCase , cls ).from_config(__lowerCAmelCase , custom_objects=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
"""simple docstring"""
super(__lowerCAmelCase , self )._prepare_local(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=None , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = list(zip(*__lowerCAmelCase ) )
return super(__lowerCAmelCase , self ).apply_gradients(zip(__lowerCAmelCase , __lowerCAmelCase ) , name=__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_lowerCamelCase : List[str] = apply_state or {}
_lowerCamelCase : Any = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_lowerCamelCase : Optional[Any] = self._fallback_apply_state(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : str = self._get_lr(var.device , var.dtype.base_dtype , __lowerCAmelCase )
_lowerCamelCase : Tuple = self._decay_weights_op(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCAmelCase , self )._resource_apply_dense(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , __lowerCAmelCase )
_lowerCamelCase : Dict = self._decay_weights_op(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCAmelCase , self )._resource_apply_sparse(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__lowerCAmelCase , __lowerCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__lowerCAmelCase , __lowerCAmelCase ) is not None:
return False
return True
class __snake_case ( _lowercase):
def __init__( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = None
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if self._accum_steps is None:
_lowerCamelCase : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Union[str, Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
if not self._gradients:
_lowerCamelCase : Union[str, Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__lowerCAmelCase ) , trainable=__lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__lowerCAmelCase ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(__lowerCAmelCase )}''' )
for accum_gradient, gradient in zip(self._gradients , __lowerCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__lowerCAmelCase )
self._accum_steps.assign_add(1 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__lowerCAmelCase ) )
| 83 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int]=1_3 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Any=[1_0, 2_0, 3_0, 4_0] , __lowerCAmelCase : List[Any]=[2, 2, 3, 2] , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=3_7 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : List[Any]=1_0 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Any=["stage2", "stage3", "stage4"] , __lowerCAmelCase : Any=3 , __lowerCAmelCase : int=None , ):
"""simple docstring"""
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : int = image_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Any = num_stages
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : Any = depths
_lowerCamelCase : List[Any] = is_training
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Tuple = out_features
_lowerCamelCase : Union[str, Any] = num_labels
_lowerCamelCase : List[str] = scope
_lowerCamelCase : str = num_stages
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Any = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__lowerCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=__lowerCAmelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = UperNetForSemanticSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : int = config_and_inputs
_lowerCamelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Union[str, Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
snake_case__ : Optional[Any] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
snake_case__ : Union[str, Any] = False
snake_case__ : Optional[int] = False
snake_case__ : Union[str, Any] = False
snake_case__ : Union[str, Any] = False
snake_case__ : int = False
snake_case__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = UperNetModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Tuple = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ):
_lowerCamelCase : Tuple = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[str] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = _config_zero_init(__lowerCAmelCase )
_lowerCamelCase : Any = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(config=__lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_lowerCamelCase : Tuple = Image.open(A_ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
_lowerCamelCase : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : List[Any] = processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : Any = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : int = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
_lowerCamelCase : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : Tuple = processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class __snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = generator.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = '''A painting of a squirrel eating a burger '''
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : int = pipe(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_lowerCamelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 83 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int = 1_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : str = set(range(3, A_, 2 ) )
primes.add(2 )
for p in range(3, A_, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, A_, A_ ) ) )
_lowerCamelCase : Tuple = [float(A_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A_, limit + 1, A_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 | 1 |
"""simple docstring"""
lowerCAmelCase__ = 0 # The first color of the flag.
lowerCAmelCase__ = 1 # The second color of the flag.
lowerCAmelCase__ = 2 # The third color of the flag.
lowerCAmelCase__ = (red, white, blue)
def snake_case_ ( A_ : list ):
'''simple docstring'''
if not sequence:
return []
if len(A_ ) == 1:
return list(A_ )
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Tuple = len(A_ ) - 1
_lowerCamelCase : List[str] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : Optional[int] = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : List[Any] = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(A_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by commas:\n''').strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 83 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83 | 1 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def snake_case_ ( A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
_lowerCamelCase : List[str] = b * b - 4 * a * c
_lowerCamelCase : List[Any] = (-b + sqrt(A_ )) / (2 * a)
_lowerCamelCase : Optional[Any] = (-b - sqrt(A_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Optional[Any] = quadratic_roots(a=5, b=6, c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 83 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : dict ):
'''simple docstring'''
_lowerCamelCase : int = set()
# edges = list of graph's edges
_lowerCamelCase : Optional[int] = get_edges(A_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_lowerCamelCase , _lowerCamelCase : Dict = edges.pop()
chosen_vertices.add(A_ )
chosen_vertices.add(A_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(A_ )
return chosen_vertices
def snake_case_ ( A_ : dict ):
'''simple docstring'''
_lowerCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 83 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = "camembert"
def __init__( self : Optional[Any] , __lowerCAmelCase : Any=3_0_5_2_2 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : List[Any]=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : str="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : List[Any] = position_embedding_type
_lowerCamelCase : int = use_cache
_lowerCamelCase : List[str] = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 83 | 1 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , _lowercase , )
class __snake_case ( _lowercase):
snake_case__ : Any = RobertaConfig
snake_case__ : Optional[Any] = "roberta"
def __init__( self : int , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = RobertaEmbeddings(__lowerCAmelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , _lowercase , )
class __snake_case ( _lowercase):
snake_case__ : int = RobertaConfig
snake_case__ : Any = "roberta"
def __init__( self : List[str] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
_lowerCamelCase : Tuple = config.num_labels
_lowerCamelCase : str = config.num_hidden_layers
_lowerCamelCase : int = DeeRobertaModel(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
_lowerCamelCase : Optional[int] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[Any]=-1 , __lowerCAmelCase : Optional[int]=False , ):
"""simple docstring"""
_lowerCamelCase : str = self.num_layers
try:
_lowerCamelCase : List[Any] = self.roberta(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , position_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase , inputs_embeds=__lowerCAmelCase , )
_lowerCamelCase : int = outputs[1]
_lowerCamelCase : List[str] = self.dropout(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.classifier(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCamelCase : Optional[int] = e.message
_lowerCamelCase : Dict = e.exit_layer
_lowerCamelCase : int = outputs[0]
if not self.training:
_lowerCamelCase : Tuple = entropy(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCamelCase : List[Any] = MSELoss()
_lowerCamelCase : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCamelCase : Any = CrossEntropyLoss()
_lowerCamelCase : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCamelCase : Union[str, Any] = []
for highway_exit in outputs[-1]:
_lowerCamelCase : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCamelCase : List[str] = MSELoss()
_lowerCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCamelCase : Tuple = CrossEntropyLoss()
_lowerCamelCase : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowerCAmelCase )
if train_highway:
_lowerCamelCase : Optional[int] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCamelCase : Any = (loss,) + outputs
if not self.training:
_lowerCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCamelCase : Optional[int] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 83 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase__ = '''=======
>>>>>>>
'''
lowerCAmelCase__ = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def snake_case_ ( A_ : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path, args.datasets_directory )
class __snake_case ( _lowercase):
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : ArgumentParser ):
"""simple docstring"""
_lowerCamelCase : List[str] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : str , __lowerCAmelCase : str , __lowerCAmelCase : str , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_logger('''datasets-cli/converting''' )
_lowerCamelCase : int = tfds_path
_lowerCamelCase : Dict = datasets_directory
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowerCamelCase : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
_lowerCamelCase : int = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowerCamelCase : str = []
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
_lowerCamelCase : List[str] = os.listdir(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_lowerCamelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__lowerCAmelCase , encoding='''utf-8''' ) as f:
_lowerCamelCase : Tuple = f.readlines()
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = []
for line in lines:
_lowerCamelCase : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowerCamelCase : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
_lowerCamelCase : List[str] = ''''''
continue
elif "from absl import logging" in out_line:
_lowerCamelCase : str = '''from datasets import logging\n'''
elif "getLogger" in out_line:
_lowerCamelCase : Union[str, Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = list(filter(lambda __lowerCAmelCase : e in out_line , __lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + '''\n''' )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowerCamelCase : str = re.sub(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowerCamelCase : Dict = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
_lowerCamelCase : Union[str, Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowerCamelCase : Any = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowerCamelCase : Union[str, Any] = f_name.replace('''.py''' , '''''' )
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowerCamelCase : Optional[int] = os.path.basename(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__lowerCAmelCase , __lowerCAmelCase )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 83 | 1 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 83 |
"""simple docstring"""
def snake_case_ ( A_ : list, A_ : list, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : int = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = knapsack(A_, A_, A_, A_, index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Tuple = values[index] + knapsack(
A_, A_, A_, max_weight - weights[index], index + 1 )
return max(A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : str=0.01 , __lowerCAmelCase : Any=1_0_0_0 ):
"""simple docstring"""
_lowerCamelCase : List[str] = p_stop
_lowerCamelCase : List[Any] = max_length
def __iter__( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCamelCase : int = random.random() < self.p_stop
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int=False , __lowerCAmelCase : Dict=True ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
for i in range(2 )
]
_lowerCamelCase : List[str] = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : str = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Tuple = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : str = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : List[str] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : int = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : List[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Tuple = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : List[str] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : str = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_lowerCamelCase : Any = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=False ):
"""simple docstring"""
random.seed(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
IterableDatasetShard(
__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , )
for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Union[str, Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
_lowerCamelCase : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCamelCase : Union[str, Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
_lowerCamelCase : List[str] = []
for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 4_2
_lowerCamelCase : Any = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
_lowerCamelCase : Optional[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = SkipBatchSampler(__lowerCAmelCase , 2 )
self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = DataLoader(list(range(1_6 ) ) , batch_size=4 )
_lowerCamelCase : str = skip_first_batches(__lowerCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
Accelerator()
_lowerCamelCase : Any = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 83 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : Optional[Any] = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 83 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 83 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_lowerCamelCase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_lowerCamelCase : List[Any] = shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCamelCase : int = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
_lowerCamelCase : Optional[Any] = optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean()
_lowerCamelCase : Dict = -(labels.shape[-1] * loss.item())
_lowerCamelCase : Dict = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 83 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def snake_case_ ( A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : int = u
for i in range(1, A_ ):
_lowerCamelCase : int = temp * (u - i)
return temp
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = int(input('''enter the numbers of values: ''' ) )
_lowerCamelCase : list[list[float]] = []
for _ in range(A_ ):
y.append([] )
for i in range(A_ ):
for j in range(A_ ):
y[i].append(A_ )
_lowerCamelCase : str = 0
print('''enter the values of parameters in a list: ''' )
_lowerCamelCase : Tuple = list(map(A_, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(A_ ):
_lowerCamelCase : Any = float(input() )
_lowerCamelCase : Any = int(input('''enter the value to interpolate: ''' ) )
_lowerCamelCase : Optional[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, A_ ):
for j in range(n - i ):
_lowerCamelCase : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
_lowerCamelCase : str = y[0][0]
for i in range(1, A_ ):
summ += (ucal(A_, A_ ) * y[0][i]) / math.factorial(A_ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 83 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase , _lowerCamelCase : str = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_lowerCamelCase : Tuple = result + left + right
return input_list
def snake_case_ ( A_ : list ):
'''simple docstring'''
if len(A_ ) <= 1:
return input_list
_lowerCamelCase : List[str] = list(A_ )
# iteration for two-way merging
_lowerCamelCase : List[Any] = 2
while p <= len(A_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0, len(A_ ), A_ ):
_lowerCamelCase : Union[str, Any] = i
_lowerCamelCase : Any = i + p - 1
_lowerCamelCase : str = (low + high + 1) // 2
_lowerCamelCase : List[str] = merge(A_, A_, A_, A_ )
# final merge of last two parts
if p * 2 >= len(A_ ):
_lowerCamelCase : Dict = i
_lowerCamelCase : Optional[int] = merge(A_, 0, A_, len(A_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 83 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.