code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def snake_case_ ( _lowerCAmelCase : List[str] ) -> str:
def wrapper(*_lowerCAmelCase : str , **_lowerCAmelCase : Tuple ):
UpperCAmelCase : int = timeit.default_timer()
UpperCAmelCase : int = func(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase : Dict = timeit.default_timer() - starttime
return delta
UpperCAmelCase : List[str] = func.__name__
return wrapper
def snake_case_ ( _lowerCAmelCase : dict , _lowerCAmelCase : Any=100 , _lowerCAmelCase : Optional[int]=None ) -> Optional[int]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[Any] = seq_shapes or {}
for i in range(_lowerCAmelCase ):
UpperCAmelCase : List[str] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_lowerCAmelCase , _ArrayXD ):
UpperCAmelCase : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase : Any = '''The small grey turtle was surprisingly fast when challenged.'''
else:
UpperCAmelCase : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_lowerCAmelCase , datasets.Sequence ):
while isinstance(_lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase : Union[str, Any] = v.feature
UpperCAmelCase : Union[str, Any] = seq_shapes[k]
UpperCAmelCase : List[str] = np.random.rand(*_lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase : List[Any] = data
dummy_data.append((i, example) )
return dummy_data
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=100 , _lowerCAmelCase : Dict=None ) -> Union[str, Any]:
UpperCAmelCase : List[str] = generate_examples(_lowerCAmelCase , num_examples=_lowerCAmelCase , seq_shapes=_lowerCAmelCase )
with ArrowWriter(features=_lowerCAmelCase , path=_lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase : Union[str, Any] = features.encode_example(_lowerCAmelCase )
writer.write(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : int = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
UpperCAmelCase : List[str] = datasets.Dataset.from_file(filename=_lowerCAmelCase , info=datasets.DatasetInfo(features=_lowerCAmelCase ) )
return dataset
| 23 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Tuple = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
UpperCAmelCase : Tuple = 1024
UpperCAmelCase : List[Any] = 4096
UpperCAmelCase : str = 24
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = [5, 11, 17, 23]
UpperCAmelCase : List[Any] = [256, 512, 1024, 1024]
UpperCAmelCase : Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 768
UpperCAmelCase : Tuple = [1, 1, 1, 0.5]
UpperCAmelCase : int = [256, 512, 768, 768]
UpperCAmelCase : Any = 150
UpperCAmelCase : Tuple = 16
UpperCAmelCase : Any = (1, 384, 384)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Tuple = '''project'''
if "ade" in checkpoint_url:
UpperCAmelCase : Any = True
UpperCAmelCase : str = 768
UpperCAmelCase : Optional[int] = [1, 1, 1, 0.5]
UpperCAmelCase : List[Any] = 150
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = '''huggingface/label-files'''
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : List[Any] = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : List[str] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Tuple ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
UpperCAmelCase : int = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
UpperCAmelCase : Tuple = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
UpperCAmelCase : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
UpperCAmelCase : str = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
UpperCAmelCase : Any = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
UpperCAmelCase : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
UpperCAmelCase : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
UpperCAmelCase : str = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
UpperCAmelCase : Dict = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
UpperCAmelCase : int = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
UpperCAmelCase : Tuple = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
UpperCAmelCase : int = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
UpperCAmelCase : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase : str = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
UpperCAmelCase : Any = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
UpperCAmelCase : Optional[int] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
UpperCAmelCase : Tuple = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase : List[str] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase : Tuple = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
UpperCAmelCase : Dict = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
UpperCAmelCase : Any = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
UpperCAmelCase : Optional[int] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
UpperCAmelCase : Optional[int] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
UpperCAmelCase : Optional[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
UpperCAmelCase : List[str] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
UpperCAmelCase : List[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase : Tuple = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Tuple = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : int = in_proj_bias[: config.hidden_size]
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Any:
UpperCAmelCase , UpperCAmelCase : int = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase : Any = state_dict.pop(_lowerCAmelCase )
UpperCAmelCase : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase : Optional[Any] = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase : int = 480 if '''ade''' in checkpoint_url else 384
UpperCAmelCase : List[Any] = DPTImageProcessor(size=_lowerCAmelCase )
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(_lowerCAmelCase , return_tensors='''pt''' )
# forward pass
UpperCAmelCase : Any = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
UpperCAmelCase : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
UpperCamelCase__: Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 23 | 1 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class UpperCamelCase ( snake_case__ ):
UpperCamelCase : Dict = """owlvit_text_model"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=49408 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : List[Any]=2048 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[Any]="quick_gelu" , UpperCAmelCase__ : Optional[int]=1E-5 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[int]=0.0_2 , UpperCAmelCase__ : List[Any]=1.0 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : List[str]=49406 , UpperCAmelCase__ : str=49407 , **UpperCAmelCase__ : Union[str, Any] , ) -> str:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_a : List[str] = vocab_size
_a : str = hidden_size
_a : List[Any] = intermediate_size
_a : Optional[int] = num_hidden_layers
_a : str = num_attention_heads
_a : Tuple = max_position_embeddings
_a : Dict = hidden_act
_a : List[str] = layer_norm_eps
_a : str = attention_dropout
_a : Any = initializer_range
_a : int = initializer_factor
@classmethod
def _lowercase ( cls : List[Any] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ) -> Any:
cls._set_token_in_kwargs(_A )
_a , _a : List[str] = cls.get_config_dict(_A , **_A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_a : List[str] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class UpperCamelCase ( snake_case__ ):
UpperCamelCase : Optional[int] = """owlvit_vision_model"""
def __init__( self : Tuple , UpperCAmelCase__ : List[Any]=768 , UpperCAmelCase__ : List[Any]=3072 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : str=768 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : str="quick_gelu" , UpperCAmelCase__ : Optional[int]=1E-5 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : int=0.0_2 , UpperCAmelCase__ : Tuple=1.0 , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]:
super().__init__(**_A )
_a : int = hidden_size
_a : List[Any] = intermediate_size
_a : List[Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Union[str, Any] = num_channels
_a : Dict = image_size
_a : Optional[int] = patch_size
_a : Optional[int] = hidden_act
_a : Optional[int] = layer_norm_eps
_a : List[str] = attention_dropout
_a : Any = initializer_range
_a : List[Any] = initializer_factor
@classmethod
def _lowercase ( cls : List[Any] , UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
cls._set_token_in_kwargs(_A )
_a , _a : str = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_a : Optional[int] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class UpperCamelCase ( snake_case__ ):
UpperCamelCase : str = """owlvit"""
UpperCamelCase : Any = True
def __init__( self : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : Optional[int]=2.6_5_9_2 , UpperCAmelCase__ : Dict=True , **UpperCAmelCase__ : Optional[int] , ) -> Tuple:
super().__init__(**_A )
if text_config is None:
_a : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
_a : Dict = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
_a : str = OwlViTTextConfig(**_A )
_a : Union[str, Any] = OwlViTVisionConfig(**_A )
_a : Optional[Any] = projection_dim
_a : List[Any] = logit_scale_init_value
_a : List[str] = return_dict
_a : Optional[Any] = 1.0
@classmethod
def _lowercase ( cls : List[str] , UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict ) -> Optional[Any]:
cls._set_token_in_kwargs(_A )
_a , _a : Optional[Any] = cls.get_config_dict(_A , **_A )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , **UpperCAmelCase__ : Dict ) -> Dict:
_a : List[Any] = {}
_a : Optional[int] = text_config
_a : Optional[int] = vision_config
return cls.from_dict(_A , **_A )
def _lowercase ( self : Dict ) -> Dict:
_a : int = copy.deepcopy(self.__dict__ )
_a : List[str] = self.text_config.to_dict()
_a : Any = self.vision_config.to_dict()
_a : Any = self.__class__.model_type
return output
class UpperCamelCase ( snake_case__ ):
@property
def _lowercase ( self : List[str] ) -> str:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _lowercase ( self : Dict ) -> Tuple:
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _lowercase ( self : int ) -> Optional[Any]:
return 1E-4
def _lowercase ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : str = -1 , UpperCAmelCase__ : Optional[int] = -1 , UpperCAmelCase__ : List[Any] = None , ) -> Union[str, Any]:
_a : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_A , seq_length=_A , framework=_A )
_a : List[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_A , framework=_A )
return {**text_input_dict, **image_input_dict}
@property
def _lowercase ( self : str ) -> Tuple:
return 14
| 364 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a , _a : Dict = len(UpperCamelCase__ ), len(grid[0] )
if (
min(UpperCamelCase__ , UpperCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_a : Any = 0
count += depth_first_search(UpperCamelCase__ , row + 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , row - 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col + 1 , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col - 1 , UpperCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = RoCBertTokenizer
_lowercase : str = None
_lowercase : Any = False
_lowercase : str = True
_lowercase : List[Any] = filter_non_english
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
super().setUp()
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowercase__ = {}
lowercase__ = {}
for i, value in enumerate(UpperCamelCase_ ):
lowercase__ = i
lowercase__ = i
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCamelCase_ , UpperCamelCase_ , ensure_ascii=UpperCamelCase_ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCamelCase_ , UpperCamelCase_ , ensure_ascii=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase__ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCamelCase_ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase_ ) , [5, 6, 2, 5, 7, 8] )
def lowerCamelCase_ ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
lowercase__ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase_ ( self: Tuple ) -> str:
"""simple docstring"""
lowercase__ = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowerCamelCase_ ( self: Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase__ = {}
for i, token in enumerate(UpperCamelCase_ ):
lowercase__ = i
lowercase__ = RoCBertWordpieceTokenizer(vocab=UpperCamelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowerCamelCase_ ( self: Dict ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def lowerCamelCase_ ( self: Optional[int] ) -> int:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowercase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ = tokenizer_r.encode_plus(
UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , )
lowercase__ = tokenizer_r.do_lower_case if hasattr(UpperCamelCase_ , '''do_lower_case''' ) else False
lowercase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ['''的''', '''人''', '''有''']
lowercase__ = ''''''.join(UpperCamelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase__ = True
lowercase__ = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_ )
lowercase__ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = False
lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_ )
lowercase__ = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(UpperCamelCase_ )
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowercase__ = tokenizer.encode('''你好''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowercase__ = '''你好,你是谁'''
lowercase__ = tokenizer.tokenize(UpperCamelCase_ )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
lowercase__ = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase_ )
lowercase__ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase_ )
lowercase__ = tokenizer.prepare_for_model(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase__ = tokenizer.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 110 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCamelCase__ ):
_lowercase : Any = ['''torch''', '''scipy''']
def __init__( self: int , *UpperCamelCase_: Any , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , *UpperCamelCase_: Any , **UpperCamelCase_: List[Any] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def lowerCamelCase_ ( cls: Any , *UpperCamelCase_: Any , **UpperCamelCase_: Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 110 | 1 |
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(_UpperCAmelCase )
for i in range(1 ,_UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = collection[i]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = i - 1
while low <= high:
_SCREAMING_SNAKE_CASE = (low + high) // 2
if val < collection[mid]:
_SCREAMING_SNAKE_CASE = mid - 1
else:
_SCREAMING_SNAKE_CASE = mid + 1
for j in range(_UpperCAmelCase ,_UpperCAmelCase ,-1 ):
_SCREAMING_SNAKE_CASE = collection[j - 1]
_SCREAMING_SNAKE_CASE = val
return collection
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 365 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
_SCREAMING_SNAKE_CASE = in_proj_weight[
: encoder_config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViTConfig(image_size=3_84 ,qkv_bias=snake_case__ )
_SCREAMING_SNAKE_CASE = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = """relu"""
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
# load HuggingFace model
_SCREAMING_SNAKE_CASE = ViTModel(snake_case__ ,add_pooling_layer=snake_case__ )
_SCREAMING_SNAKE_CASE = TrOCRForCausalLM(snake_case__ )
_SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel(encoder=snake_case__ ,decoder=snake_case__ )
model.eval()
# load state_dict of original model, rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(snake_case__ ,map_location="""cpu""" ,check_hash=snake_case__ )["""model"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(snake_case__ ,snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ ,snake_case__ ,snake_case__ )
read_in_q_k_v(snake_case__ ,snake_case__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
_SCREAMING_SNAKE_CASE = val
else:
_SCREAMING_SNAKE_CASE = val
# load state dict
model.load_state_dict(snake_case__ )
# Check outputs on an image
_SCREAMING_SNAKE_CASE = ViTImageProcessor(size=encoder_config.image_size )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""roberta-large""" )
_SCREAMING_SNAKE_CASE = TrOCRProcessor(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = processor(images=prepare_img(snake_case__ ) ,return_tensors="""pt""" ).pixel_values
# verify logits
_SCREAMING_SNAKE_CASE = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_SCREAMING_SNAKE_CASE = model(pixel_values=snake_case__ ,decoder_input_ids=snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] ,snake_case__ ,atol=1e-3 ), "First elements of logits not as expected"
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 125 | 0 |
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case = _modexpt(A__ ,exponent // 2 ,A__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A__ ,exponent - 1 ,A__ )) % modulo_value
def UpperCAmelCase__ (UpperCamelCase_ = 17_77 ,UpperCamelCase_ = 18_55 ,UpperCamelCase_ = 8 ):
"""simple docstring"""
snake_case = base
for _ in range(1 ,A__ ):
snake_case = _modexpt(A__ ,A__ ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 127 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
_lowercase =self.layer[current_layer](lowerCAmelCase , lowerCAmelCase , head_mask[current_layer] )
_lowercase =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =BertEncoderWithPabee(lowerCAmelCase )
self.init_weights()
_lowercase =0
_lowercase =0
_lowercase =0
_lowercase =0
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =threshold
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =patience
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =0
_lowercase =0
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.inference_layers_num / self.inference_instances_num
_lowercase =(
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase )
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , ) -> str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase =input_ids.size()
elif inputs_embeds is not None:
_lowercase =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
if token_type_ids is None:
_lowercase =torch.zeros(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowercase , _lowercase , _lowercase =encoder_hidden_states.size()
_lowercase =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
_lowercase =self.invert_attention_mask(lowerCAmelCase )
else:
_lowercase =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase =self.get_head_mask(lowerCAmelCase , self.config.num_hidden_layers )
_lowercase =self.embeddings(
input_ids=lowerCAmelCase , position_ids=lowerCAmelCase , token_type_ids=lowerCAmelCase , inputs_embeds=lowerCAmelCase )
_lowercase =embedding_output
if self.training:
_lowercase =[]
for i in range(self.config.num_hidden_layers ):
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](output_dropout(lowerCAmelCase ) )
res.append(lowerCAmelCase )
elif self.patience == 0: # Use all layers for inference
_lowercase =self.encoder(
lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
_lowercase =self.pooler(encoder_outputs[0] )
_lowercase =[output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase )]
else:
_lowercase =0
_lowercase =None
_lowercase =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](lowerCAmelCase )
if regression:
_lowercase =logits.detach()
if patient_result is not None:
_lowercase =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_lowercase =0
else:
_lowercase =logits.detach().argmax(dim=1 )
if patient_result is not None:
_lowercase =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase ) ):
patient_counter += 1
else:
_lowercase =0
_lowercase =logits
if patient_counter == self.patience:
break
_lowercase =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config.num_labels
_lowercase =BertModelWithPabee(lowerCAmelCase )
_lowercase =nn.Dropout(config.hidden_dropout_prob )
_lowercase =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.bert(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_lowercase =(logits[-1],)
if labels is not None:
_lowercase =None
_lowercase =0
for ix, logits_item in enumerate(lowerCAmelCase ):
if self.num_labels == 1:
# We are doing regression
_lowercase =MSELoss()
_lowercase =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_lowercase =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowercase =(total_loss / total_weights,) + outputs
return outputs
| 205 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
a_ = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
a_ = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
a_ = """▁"""
# Segments (not really needed)
a_ = 0
a_ = 1
a_ = 2
a_ = 3
a_ = 4
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = """left"""
_lowerCamelCase = XLNetTokenizer
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<sep>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<cls>" , __lowerCamelCase="<mask>" , __lowerCamelCase=["<eop>", "<eod>"] , **__lowerCamelCase , ):
'''simple docstring'''
__A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
__A : Dict = 3
__A : Optional[Any] = do_lower_case
__A : Tuple = remove_space
__A : Tuple = keep_accents
__A : Optional[Any] = vocab_file
__A : Optional[Any] = False if not self.vocab_file else True
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : List[Any] = [self.sep_token_id]
__A : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : Optional[int] = [self.sep_token_id]
__A : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 291 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
snake_case_ = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
snake_case_ = f'''https://www.google.com/search?q={query}&num=100'''
snake_case_ = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
snake_case_ = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
snake_case_ = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 78 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__snake_case : Optional[Any] = len(a_ ) - 1
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__snake_case : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a_ ) , 5 ) == 1
return output_values
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__snake_case : List[str] = self.basis_function(a_ )
__snake_case : str = 0.0
__snake_case : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE (self , a_ = 0.01 ):
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__snake_case : list[float] = [] # x coordinates of points to plot
__snake_case : list[float] = [] # y coordinates of points to plot
__snake_case : int = 0.0
while t <= 1:
__snake_case : Union[str, Any] = self.bezier_curve_function(a_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__snake_case : List[Any] = [i[0] for i in self.list_of_points]
__snake_case : Any = [i[1] for i in self.list_of_points]
plt.plot(
a_ , a_ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(a_ , a_ , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 102 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 370 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Optional[int]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__=True ) ->List[str]:
'''simple docstring'''
model.train()
_UpperCamelCase = model(a__ )
_UpperCamelCase = F.mse_loss(a__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(a__ )
def lowerCAmelCase__ ( a__ , a__=False ) ->Union[str, Any]:
'''simple docstring'''
set_seed(42 )
_UpperCamelCase = RegressionModel()
_UpperCamelCase = deepcopy(a__ )
_UpperCamelCase = RegressionDataset(length=80 )
_UpperCamelCase = DataLoader(a__ , batch_size=16 )
model.to(accelerator.device )
if sched:
_UpperCamelCase = AdamW(params=model.parameters() , lr=1e-3 )
_UpperCamelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
_UpperCamelCase = LambdaLR(a__ , lr_lambda=lambda a__ : epoch**0.65 )
_UpperCamelCase = LambdaLR(a__ , lr_lambda=lambda a__ : epoch**0.65 )
# Make a copy of `model`
if sched:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(a__ , a__ , a__ , a__ )
else:
_UpperCamelCase , _UpperCamelCase = accelerator.prepare(a__ , a__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ )
# Use a single batch
_UpperCamelCase , _UpperCamelCase = next(iter(a__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a__ , a__ , a__ , a__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a__ ):
step_model(a__ , a__ , a__ , a__ )
else:
# Sync grads
step_model(a__ , a__ , a__ , a__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(a__ , a__ , a__ , a__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_UpperCamelCase = ddp_input[torch.randperm(len(a__ ) )]
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ )
# Use a single batch
_UpperCamelCase , _UpperCamelCase = next(iter(a__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a__ , a__ , a__ , a__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(a__ ):
step_model(a__ , a__ , a__ , a__ )
else:
# Sync grads
step_model(a__ , a__ , a__ , a__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_UpperCamelCase = ddp_input[torch.randperm(len(a__ ) )]
def lowerCAmelCase__ ( a__=False , a__=False ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = Accelerator(
split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ )
for iteration, batch in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
_UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(a__ , a__ , a__ , a__ , a__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(a__ ):
step_model(a__ , a__ , a__ , a__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(a__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_UpperCamelCase = ddp_input[torch.randperm(len(a__ ) )]
GradientState._reset_state()
def lowerCAmelCase__ ( a__=False , a__=False ) ->Dict:
'''simple docstring'''
_UpperCamelCase = Accelerator(
split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_training_setup(a__ , a__ )
for iteration, batch in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
_UpperCamelCase , _UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
_UpperCamelCase , _UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(a__ , a__ , a__ , a__ , a__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(a__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(a__ ):
step_model(a__ , a__ , a__ , a__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_UpperCamelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(a__ ))
if accelerator.num_processes > 1:
check_model_parameters(a__ , a__ , a__ , a__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def lowerCAmelCase__ ( ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = Accelerator()
_UpperCamelCase = RegressionDataset(length=80 )
_UpperCamelCase = DataLoader(a__ , batch_size=16 )
_UpperCamelCase = RegressionDataset(length=96 )
_UpperCamelCase = DataLoader(a__ , batch_size=16 )
_UpperCamelCase , _UpperCamelCase = accelerator.prepare(a__ , a__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(a__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a__ )
if iteration < len(a__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(a__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(a__ )
if batch_num < len(a__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
_UpperCamelCase = Accelerator()
_UpperCamelCase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(a__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(a__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(a__ , a__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(a__ , a__ )
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 63 | 0 |
from collections.abc import Callable
import numpy as np
def UpperCamelCase( __UpperCamelCase : Callable ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ):
lowerCAmelCase_ : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase_ : Optional[Any] = np.zeros((n + 1,) )
lowerCAmelCase_ : Tuple = ya
lowerCAmelCase_ : Dict = xa
for k in range(__UpperCamelCase ):
lowerCAmelCase_ : str = y[k] + step_size * ode_func(__UpperCamelCase ,y[k] )
lowerCAmelCase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase ,y[k] ) + ode_func(x + step_size ,__UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt'
UpperCAmelCase__ = '"text": ["foo", "foo"]'
UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCAmelCase__ :
__a = 200
__a = {"""Content-Length""": """100"""}
__a = {}
def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ):
return [bytes(_lowerCamelCase , '''utf-8''' )]
def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int:
import requests
monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase )
_snake_case = URL
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = url
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [url]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': url}
_snake_case = '''dummy'''
_snake_case = '''downloads'''
_snake_case = tmp_path
_snake_case = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.download(__lowerCamelCase )
_snake_case = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [downloaded_paths]
_snake_case = [urls]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
_snake_case = downloaded_paths.values()
_snake_case = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case = Path(__lowerCamelCase )
_snake_case = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case = downloaded_path.read_text()
assert content == CONTENT
_snake_case = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_snake_case = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int:
_snake_case = str(__lowerCamelCase )
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = filename
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [filename]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': filename}
_snake_case = '''dummy'''
_snake_case = xz_file.parent
_snake_case = '''extracted'''
_snake_case = DownloadConfig(
cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.extract(__lowerCamelCase )
_snake_case = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [extracted_paths]
_snake_case = [paths]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in extracted_paths.keys()
_snake_case = extracted_paths.values()
_snake_case = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case = Path(__lowerCamelCase )
_snake_case = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case = extracted_path.read_text()
_snake_case = text_file.read_text()
assert extracted_file_content == expected_file_content
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCamelCase , start=1 ):
_snake_case = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]:
_snake_case = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 288 | 0 |
import collections
import os
import re
from pathlib import Path
A: Optional[int] = "src/transformers"
# Matches is_xxx_available()
A: int = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
A: int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A: List[str] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
A: Optional[Any] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
A: Dict = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A: Optional[int] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
A: Dict = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
A: Union[str, Any] = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
A: Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
A: Optional[Any] = re.compile(R"^\s*try:")
# Catches a line with else:
A: int = re.compile(R"^\s*else:")
def _snake_case ( UpperCamelCase : Tuple ):
if _re_test_backend.search(UpperCamelCase ) is None:
return None
UpperCAmelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(UpperCamelCase )]
backends.sort()
return "_and_".join(UpperCamelCase )
def _snake_case ( UpperCamelCase : Optional[int] ):
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase : int = f.readlines()
UpperCAmelCase : Dict = 0
while line_index < len(UpperCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase : Optional[int] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase ):
UpperCAmelCase : List[Any] = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0]
UpperCAmelCase : Dict = re.findall(R"""\[([^\]]+)\]""" , UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCAmelCase : Any = _re_import_struct_key_value.search(UpperCamelCase )
if single_line_import_search is not None:
UpperCAmelCase : Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase : Optional[int] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCAmelCase : str = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase ) is not None:
UpperCAmelCase : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(""", """ )
UpperCAmelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_between_brackets.search(UpperCamelCase ) is not None:
UpperCAmelCase : Optional[int] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(""", """ )
UpperCAmelCase : str = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_quote_object.search(UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase : Dict = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase : List[Any] = []
while (
line_index < len(UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCAmelCase : List[Any] = lines[line_index]
UpperCAmelCase : str = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase : Optional[int] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCAmelCase : Tuple = lines[line_index]
UpperCAmelCase : Union[str, Any] = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : List[Any] ):
def find_duplicates(UpperCamelCase : Union[str, Any] ):
return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase : Tuple = []
for key in import_dict_objects.keys():
UpperCAmelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase : Dict = """base imports""" if key == """none""" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _snake_case ( ):
UpperCAmelCase : Dict = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
UpperCAmelCase : List[str] = os.path.join(UpperCamelCase , """__init__.py""" )
UpperCAmelCase : Optional[Any] = parse_init(UpperCamelCase )
if objects is not None:
UpperCAmelCase : str = analyze_results(*UpperCamelCase )
if len(UpperCamelCase ) > 0:
UpperCAmelCase : str = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
raise ValueError("""\n\n""".join(UpperCamelCase ) )
def _snake_case ( ):
UpperCAmelCase : Tuple = []
for path, directories, files in os.walk(UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCAmelCase : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) )
UpperCAmelCase : List[Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase : Optional[Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) )
UpperCAmelCase : List[Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(UpperCamelCase )
return submodules
A: List[str] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
UpperCAmelCase : List[str] = direct_transformers_import(UpperCamelCase )
UpperCAmelCase : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCamelCase , """__init__.py""" ) , """r""" ) as f:
UpperCAmelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , UpperCamelCase ) ) )
UpperCAmelCase : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCamelCase ) > 0:
UpperCAmelCase : Union[str, Any] = """\n""".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 354 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( UpperCamelCase : str ):
def decorator(UpperCamelCase : Optional[int] ):
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , """handle_key""" , [] )
handle += [key]
setattr(UpperCamelCase , """handle_key""" , UpperCamelCase )
return func
return decorator
def _snake_case ( *UpperCamelCase : List[str] ):
def decorator(UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """handle_key""" , [] )
handle += keys
setattr(UpperCamelCase , """handle_key""" , UpperCamelCase )
return func
return decorator
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __new__( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] = super().__new__(cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not hasattr(_SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(_SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(_SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase : List[str] = getattr(_SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
UpperCAmelCase : Optional[int] = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase : List[Any] = ord(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = cls.key_handler.get(_SCREAMING_SNAKE_CASE )
if handler:
UpperCAmelCase : int = char
return handler(cls )
else:
return None
def _snake_case ( cls : Union[str, Any] ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 76 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCAmelCase_ = 4
UpperCAmelCase_ = 48
UpperCAmelCase_ = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_ = [6, 6, 6, 6]
UpperCAmelCase_ = 60
UpperCAmelCase_ = [6, 6, 6, 6]
UpperCAmelCase_ = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_ = 4
UpperCAmelCase_ = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1_26
UpperCAmelCase_ = 7
UpperCAmelCase_ = 255.0
UpperCAmelCase_ = ""
return config
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCAmelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCAmelCase_ = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
UpperCAmelCase_ = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
UpperCAmelCase_ = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
UpperCAmelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
UpperCAmelCase_ = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
UpperCAmelCase_ = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
UpperCAmelCase_ = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
UpperCAmelCase_ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
UpperCAmelCase_ = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
UpperCAmelCase_ = "layernorm.weight"
if name == "norm.bias":
UpperCAmelCase_ = "layernorm.bias"
if "conv_first" in name:
UpperCAmelCase_ = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCAmelCase_ = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCAmelCase_ = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
UpperCAmelCase_ = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
UpperCAmelCase_ = name.replace("upsample.2" , "upsample.convolution_1" )
UpperCAmelCase_ = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
UpperCAmelCase_ = name.replace("upsample.0.weight" , "upsample.conv.weight" )
UpperCAmelCase_ = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
UpperCAmelCase_ = "swin2sr." + name
return name
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[Any] ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ = int(key_split[1] )
UpperCAmelCase_ = int(key_split[4] )
UpperCAmelCase_ = config.embed_dim
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
pass
else:
UpperCAmelCase_ = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Dict , snake_case_ : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
UpperCAmelCase_ = SwinaSRForImageSuperResolution(snake_case_ )
model.eval()
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = convert_state_dict(snake_case_ , snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ )
if len(snake_case_ ) > 0:
raise ValueError("Missing keys when converting: {}".format(snake_case_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
UpperCAmelCase_ = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCAmelCase_ = 1_26 if "Jpeg" in checkpoint_url else 2_56
UpperCAmelCase_ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
UpperCAmelCase_ = transforms(snake_case_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCAmelCase_ = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCAmelCase_ = model(snake_case_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_ = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_ = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCAmelCase_ = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_ = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_ = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_ = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case_ , atol=1E-3 )
print("Looks ok!" )
UpperCAmelCase_ = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
UpperCAmelCase_ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case_ )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
SCREAMING_SNAKE_CASE_: List[Any] =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 222 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 222 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Any = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
_lowerCamelCase : Optional[Any] = quote(A_ )
return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
| 72 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("""fixtures""")
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = mock.Mock()
_a : Any = 500
_a : Tuple = {}
_a : int = HTTPError
_a : Tuple = {}
# Download this model to make sure it's in the cache.
_a : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_SCREAMING_SNAKE_CASE ) as mock_head:
_a : Any = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[Any] = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : Optional[int] = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
_a : int = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' ,subfolder='feature_extractor' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : int ):
'''simple docstring'''
_a : str = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __lowercase ( cls : Dict ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def __lowercase ( self : int ):
'''simple docstring'''
_a : Dict = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub('test-image-processor' ,use_auth_token=self._token )
_a : str = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE ,repo_id='test-image-processor' ,push_to_hub=_SCREAMING_SNAKE_CASE ,use_auth_token=self._token )
_a : List[str] = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : str ):
'''simple docstring'''
_a : int = ViTImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub('valid_org/test-image-processor' ,use_auth_token=self._token )
_a : Optional[Any] = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_SCREAMING_SNAKE_CASE ,repo_id='valid_org/test-image-processor-org' ,push_to_hub=_SCREAMING_SNAKE_CASE ,use_auth_token=self._token )
_a : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE ,getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
_a : List[Any] = CustomImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub('test-dynamic-image-processor' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map ,{'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} ,)
_a : List[str] = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" ,trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ ,'CustomImageProcessor' )
| 363 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = """docs/source/en/_toctree.yml"""
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
_a : List[str] = [key for key, value in counts.items() if value > 1]
_a : str = []
for duplicate_key in duplicates:
_a : Union[str, Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def UpperCAmelCase_ (__a : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding='utf-8' ) as f:
_a : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_a : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_a : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a : List[str] = api_doc[model_idx]['sections']
_a : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
_a : Tuple = False
for idx, modality_doc in modalities_docs:
_a : List[Any] = modality_doc['sections']
_a : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
_a : Union[str, Any] = True
if overwrite:
_a : str = new_modality_doc
if diff:
if overwrite:
_a : Dict = model_doc
_a : Dict = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 5 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowercase_ : Optional[Any] = (DDIMParallelScheduler,)
lowercase_ : Dict = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def lowerCamelCase_ ( self , **snake_case_ ):
"""simple docstring"""
A_ : Dict = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowercase_ )
return config
def lowerCamelCase_ ( self , **snake_case_ ):
"""simple docstring"""
A_ : Dict = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(**lowercase_ )
A_ : Dict = scheduler_class(**lowercase_ )
A_ , A_ : int = 1_0, 0.0
A_ : Optional[int] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for t in scheduler.timesteps:
A_ : Any = model(lowercase_ , lowercase_ )
A_ : Tuple = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def lowerCamelCase_ ( self ):
"""simple docstring"""
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
A_ : Tuple = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
A_ : List[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=lowercase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase_ , eta=lowercase_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : List[Any] = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1E-5
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.scheduler_classes[0]
A_ : List[Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**lowercase_ )
A_ , A_ : int = 1_0, 0.0
scheduler.set_timesteps(lowercase_ )
A_ : str = self.dummy_model()
A_ : Dict = self.dummy_sample_deter
A_ : Union[str, Any] = self.dummy_sample_deter + 0.1
A_ : List[str] = self.dummy_sample_deter - 0.1
A_ : Union[str, Any] = samplea.shape[0]
A_ : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
A_ : Union[str, Any] = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
A_ : Optional[int] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A_ : Union[str, Any] = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ )
A_ : Any = torch.sum(torch.abs(lowercase_ ) )
A_ : Any = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.full_loop()
A_ : int = torch.sum(torch.abs(lowercase_ ) )
A_ : Any = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = self.full_loop(prediction_type='v_prediction' )
A_ : Tuple = torch.sum(torch.abs(lowercase_ ) )
A_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
A_ : Dict = torch.sum(torch.abs(lowercase_ ) )
A_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
A_ : Dict = torch.sum(torch.abs(lowercase_ ) )
A_ : Optional[int] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3 | 286 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = x
UpperCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
UpperCAmelCase = a * a - b * b + x
UpperCAmelCase = 2 * a * b + y
UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ):
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) )
UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase = figure_width / image_width * image_height
UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase = get_color_coded_rgb(lowercase_ )
else:
UpperCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : int | float | str, __snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
A__ : Optional[int] =int(__snake_case )
A__ : Dict =int(__snake_case )
A__ : list[str] =[]
for temp in range(int(__snake_case ) ):
series.append(f"1 / {pow(temp + 1, int(__snake_case ) )}" if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : List[Any] = int(input('Enter the last number (nth term) of the P-Series'))
__snake_case : int = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 136 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : list[int] ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) == len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
a_ = logging.getLogger(__name__)
class UpperCAmelCase_ ( _lowercase ):
UpperCamelCase ='''summarization'''
UpperCamelCase =['''loss''']
UpperCamelCase =ROUGE_KEYS
UpperCamelCase ='''rouge2'''
def __init__( self , UpperCamelCase_ , **UpperCamelCase_ ) -> int:
if hparams.sortish_sampler and hparams.gpus > 1:
__lowercase : int = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , mode=self.mode , **_SCREAMING_SNAKE_CASE )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
__lowercase : List[Any] = Path(self.output_dir ) / '''metrics.json'''
__lowercase : int = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
__lowercase : Tuple = 0
__lowercase : List[Any] = defaultdict(_SCREAMING_SNAKE_CASE )
__lowercase : Any = self.config.model_type
__lowercase : Any = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
__lowercase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__lowercase : List[Any] = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
__lowercase : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__lowercase : Tuple = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__lowercase : List[str] = get_git_info()['''repo_sha''']
__lowercase : int = hparams.num_workers
__lowercase : Optional[int] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ):
__lowercase : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__lowercase : List[Any] = self.decoder_start_token_id
__lowercase : int = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
__lowercase : Union[str, Any] = False
__lowercase : List[str] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__lowercase : int = self.hparams.eval_max_gen_length
else:
__lowercase : Tuple = self.model.config.max_length
__lowercase : List[str] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict[str, List[str]]:
__lowercase : Any = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_SCREAMING_SNAKE_CASE , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
__lowercase : Union[str, Any] = True
return readable_batch
def _lowerCamelCase ( self , UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple:
return self.model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
__lowercase : int = self.tokenizer.batch_decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
return lmap(str.strip , _SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Any = self.tokenizer.pad_token_id
__lowercase : int = batch['''input_ids'''], batch['''attention_mask''']
__lowercase : Optional[Any] = batch['''labels''']
if isinstance(self.model , _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = self.model._shift_right(_SCREAMING_SNAKE_CASE )
else:
__lowercase : Any = shift_tokens_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__lowercase : Any = decoder_input_ids
self.save_readable_batch(_SCREAMING_SNAKE_CASE )
__lowercase : Tuple = self(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
__lowercase : List[str] = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__lowercase : Optional[int] = nn.CrossEntropyLoss(ignore_index=_SCREAMING_SNAKE_CASE )
assert lm_logits.shape[-1] == self.vocab_size
__lowercase : Optional[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__lowercase : Union[str, Any] = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
__lowercase : Union[str, Any] = label_smoothed_nll_loss(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.hparams.label_smoothing , ignore_index=_SCREAMING_SNAKE_CASE )
return (loss,)
@property
def _lowerCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
__lowercase : List[Any] = self._step(_SCREAMING_SNAKE_CASE )
__lowercase : int = dict(zip(self.loss_names , _SCREAMING_SNAKE_CASE ) )
# tokens per batch
__lowercase : Dict = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
__lowercase : Optional[int] = batch['''input_ids'''].shape[0]
__lowercase : List[Any] = batch['''input_ids'''].eq(self.pad ).sum()
__lowercase : List[str] = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
return self._generative_step(_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_="val" ) -> Dict:
self.step_count += 1
__lowercase : Any = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__lowercase : Union[str, Any] = losses['''loss''']
__lowercase : Union[str, Any] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
__lowercase : Optional[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__lowercase : torch.FloatTensor = torch.tensor(_SCREAMING_SNAKE_CASE ).type_as(_SCREAMING_SNAKE_CASE )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_SCREAMING_SNAKE_CASE )
__lowercase : Dict = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
__lowercase : Optional[Any] = self.step_count
self.metrics[prefix].append(_SCREAMING_SNAKE_CASE ) # callback writes this to self.metrics_save_path
__lowercase : Optional[int] = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
return calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> dict:
__lowercase : Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__lowercase : str = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_SCREAMING_SNAKE_CASE , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__lowercase : List[str] = (time.time() - ta) / batch['''input_ids'''].shape[0]
__lowercase : List[str] = self.ids_to_clean_text(_SCREAMING_SNAKE_CASE )
__lowercase : List[str] = self.ids_to_clean_text(batch['''labels'''] )
__lowercase : Any = self._step(_SCREAMING_SNAKE_CASE )
__lowercase : int = dict(zip(self.loss_names , _SCREAMING_SNAKE_CASE ) )
__lowercase : Dict = self.calc_generative_metrics(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase : str = np.mean(lmap(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
base_metrics.update(gen_time=_SCREAMING_SNAKE_CASE , gen_len=_SCREAMING_SNAKE_CASE , preds=_SCREAMING_SNAKE_CASE , target=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return base_metrics
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
return self._generative_step(_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
return self.validation_epoch_end(_SCREAMING_SNAKE_CASE , prefix='''test''' )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> SeqaSeqDataset:
__lowercase : Dict = self.n_obs[type_path]
__lowercase : Dict = self.target_lens[type_path]
__lowercase : int = self.dataset_class(
self.tokenizer , type_path=_SCREAMING_SNAKE_CASE , n_obs=_SCREAMING_SNAKE_CASE , max_target_length=_SCREAMING_SNAKE_CASE , **self.dataset_kwargs , )
return dataset
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False ) -> DataLoader:
__lowercase : str = self.get_dataset(_SCREAMING_SNAKE_CASE )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__lowercase : Any = dataset.make_sortish_sampler(_SCREAMING_SNAKE_CASE , distributed=self.hparams.gpus > 1 )
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=_SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=_SCREAMING_SNAKE_CASE , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__lowercase : Optional[int] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_sampler=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=_SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=_SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self ) -> DataLoader:
__lowercase : Dict = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_SCREAMING_SNAKE_CASE )
return dataloader
def _lowerCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _lowerCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
BaseTransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
add_generic_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--max_source_length''' , default=10_24 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=1_42 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=1_42 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--max_tokens_per_batch''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--logger_name''' , type=_SCREAMING_SNAKE_CASE , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_SCREAMING_SNAKE_CASE , default=5_00 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_SCREAMING_SNAKE_CASE , default='''summarization''' , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_SCREAMING_SNAKE_CASE , default=0.0 , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--src_lang''' , type=_SCREAMING_SNAKE_CASE , default='''''' , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--tgt_lang''' , type=_SCREAMING_SNAKE_CASE , default='''''' , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--eval_beams''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--val_metric''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_SCREAMING_SNAKE_CASE , default=1 , required=_SCREAMING_SNAKE_CASE , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCAmelCase_ ( _lowercase ):
UpperCamelCase ='''translation'''
UpperCamelCase =['''loss''']
UpperCamelCase =['''bleu''']
UpperCamelCase ='''bleu'''
def __init__( self , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
super().__init__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowercase : Dict = hparams.src_lang
__lowercase : str = hparams.tgt_lang
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> dict:
return calculate_bleu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=_a )
check_output_dir(_a , expected_items=3 )
if model is None:
if "summarization" in args.task:
__lowercase : SummarizationModule = SummarizationModule(_a )
else:
__lowercase : SummarizationModule = TranslationModule(_a )
__lowercase : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
__lowercase : Any = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__lowercase : Optional[int] = os.environ.get('''WANDB_PROJECT''' , _a )
__lowercase : Union[str, Any] = WandbLogger(name=model.output_dir.name , project=_a )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__lowercase : List[Any] = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
__lowercase : Optional[Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__lowercase : int = False
__lowercase : str = args.val_metric == '''loss'''
__lowercase : pl.Trainer = generic_train(
_a , _a , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _a ) , early_stopping_callback=_a , logger=_a , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
__lowercase : Any = ''''''
__lowercase : Tuple = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_a ) )
if checkpoints:
__lowercase : List[str] = checkpoints[-1]
__lowercase : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = pl.Trainer.add_argparse_args(parser)
a_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
a_ = parser.parse_args()
main(args)
| 249 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase = logging.get_logger(__name__)
class _a ( _lowercase):
_a : Optional[Any] = ['''pixel_values''']
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **_SCREAMING_SNAKE_CASE : int , )-> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 256}
lowerCAmelCase__ : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase__ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Any = resample
lowerCAmelCase__ : str = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : str = do_rescale
lowerCAmelCase__ : List[str] = rescale_factor
lowerCAmelCase__ : int = do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Dict , )-> np.ndarray:
lowerCAmelCase__ : str = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ : List[str] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : List[str] , )-> np.ndarray:
lowerCAmelCase__ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Optional[int] )-> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : str , )-> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : ImageInput , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE : Tuple , )-> Optional[Any]:
lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : List[str] = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ : List[Any] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCAmelCase__ : Dict = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCAmelCase__ : List[Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCAmelCase__ : Tuple = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Tuple] = None )-> List[Any]:
lowerCAmelCase__ : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Tuple = target_sizes.numpy()
lowerCAmelCase__ : Tuple = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Any = logits.argmax(dim=1 )
lowerCAmelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 131 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__A : Optional[int] = '\\n Text data.\n Second line of data.'
__A : List[str] = 'file'
@pytest.fixture(scope="""session""" )
def __UpperCamelCase ( _A : Any ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
lowerCamelCase_ =bytes(_A , """utf-8""" )
with zstd.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture
def __UpperCamelCase ( _A : Tuple ) ->Any:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , _A ) , """w""" ) as f:
f.write(_A )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def __UpperCamelCase ( _A : List[Any] , _A : List[Any] , _A : str , _A : Dict , _A : Dict , _A : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ={"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
lowerCamelCase_ =input_paths[compression_format]
lowerCamelCase_ =tmp_path / """cache"""
lowerCamelCase_ =DownloadConfig(cache_dir=_A , extract_compressed_file=_A )
lowerCamelCase_ =cached_path(_A , download_config=_A )
with open(_A ) as f:
lowerCamelCase_ =f.read()
with open(_A ) as f:
lowerCamelCase_ =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def __UpperCamelCase ( _A : Any , _A : Tuple , _A : List[Any] , _A : Union[str, Any] , _A : str ) ->str:
"""simple docstring"""
lowerCamelCase_ ="""custom_cache"""
lowerCamelCase_ ="""custom_extracted_dir"""
lowerCamelCase_ =tmp_path / """custom_extracted_path"""
if default_extracted:
lowerCamelCase_ =("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _A )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_A ) )
lowerCamelCase_ =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowerCamelCase_ =xz_file
lowerCamelCase_ =(
DownloadConfig(extract_compressed_file=_A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_A )
)
lowerCamelCase_ =cached_path(_A , download_config=_A )
assert Path(_A ).parent.parts[-2:] == expected
def __UpperCamelCase ( _A : List[Any] ) ->int:
"""simple docstring"""
lowerCamelCase_ =str(Path(_A ).resolve() )
assert cached_path(_A ) == text_file
# relative path
lowerCamelCase_ =str(Path(_A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_A ) == text_file
def __UpperCamelCase ( _A : List[str] ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(_A ):
cached_path(_A )
# relative path
lowerCamelCase_ ="""./__missing_file__.txt"""
with pytest.raises(_A ):
cached_path(_A )
def __UpperCamelCase ( _A : Any ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =get_from_cache(f'tmp://{tmpfs_file}' )
with open(_A ) as f:
lowerCamelCase_ =f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A )
def __UpperCamelCase ( ) ->List[str]:
"""simple docstring"""
with pytest.raises(_A ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A )
def __UpperCamelCase ( _A : Tuple ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_A ):
http_get("""https://huggingface.co""" , temp_file=_A )
with pytest.raises(_A ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A )
def __UpperCamelCase ( _A : List[Any] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_A ):
ftp_get("""ftp://huggingface.co""" , temp_file=_A )
with pytest.raises(_A ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A )
def __UpperCamelCase ( _A : Union[str, Any] ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_A ):
fsspec_get("""s3://huggingface.co""" , temp_file=_A )
with pytest.raises(_A ):
fsspec_head("""s3://huggingface.co""" )
| 368 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __UpperCamelCase ( _A : Optional[int] ) ->List[str]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCamelCase_ =k.replace(_A , _A )
if k.startswith("""encoder""" ):
lowerCamelCase_ =k.replace(""".attn""" , """.self_attn""" )
lowerCamelCase_ =k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowerCamelCase_ =k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm3""" , """final_layer_norm""" )
return k
def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowerCamelCase_ =sd.pop(_A )
lowerCamelCase_ =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
lowerCamelCase_ =v
__A : Any = ['START']
@torch.no_grad()
def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] , _A : List[str] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =torch.load(_A , map_location="""cpu""" )
lowerCamelCase_ =model["""model"""]
lowerCamelCase_ =BlenderbotConfig.from_json_file(_A )
lowerCamelCase_ =BlenderbotForConditionalGeneration(_A )
lowerCamelCase_ =m.model.state_dict().keys()
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCamelCase_ =rename_state_dict_key(_A )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCamelCase_ =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_A )
m.model.load_state_dict(_A , strict=_A )
m.half()
m.save_pretrained(_A )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
__A : str = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 49 | 0 |
from __future__ import annotations
UpperCAmelCase : Tuple =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
UpperCamelCase_ = [
[0 for col in range(len(grid[0]))] for row in range(len(_lowerCamelCase))
] # the reference grid
UpperCamelCase_ = 1
UpperCamelCase_ = [
[0 for col in range(len(grid[0]))] for row in range(len(_lowerCamelCase))
] # the action grid
UpperCamelCase_ = init[0]
UpperCamelCase_ = init[1]
UpperCamelCase_ = 0
UpperCamelCase_ = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCamelCase_ = [[f, g, x, y]]
UpperCamelCase_ = False # flag that is set when search is complete
UpperCamelCase_ = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase) == 0:
raise ValueError("Algorithm is unable to find solution")
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCamelCase_ = cell.pop()
UpperCamelCase_ = next_cell[2]
UpperCamelCase_ = next_cell[3]
UpperCamelCase_ = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCamelCase_ = True
else:
for i in range(len(_lowerCamelCase)): # to try out different valid actions
UpperCamelCase_ = x + DIRECTIONS[i][0]
UpperCamelCase_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase) and ya >= 0 and ya < len(grid[0]):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCamelCase_ = g + cost
UpperCamelCase_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya])
UpperCamelCase_ = 1
UpperCamelCase_ = i
UpperCamelCase_ = []
UpperCamelCase_ = goal[0]
UpperCamelCase_ = goal[1]
invpath.append([x, y]) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCamelCase_ = x - DIRECTIONS[action[x][y]][0]
UpperCamelCase_ = y - DIRECTIONS[action[x][y]][1]
UpperCamelCase_ = xa
UpperCamelCase_ = ya
invpath.append([x, y])
UpperCamelCase_ = []
for i in range(len(_lowerCamelCase)):
path.append(invpath[len(_lowerCamelCase) - 1 - i])
return path, action
if __name__ == "__main__":
UpperCAmelCase : Optional[int] =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase : str =[0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase : int =[len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase : List[str] =1
# the cost map which pushes the path closer to the goal
UpperCAmelCase : str =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase : Dict =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase : List[str] =99
UpperCAmelCase : str =search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 128 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = ComputeEnvironment.AMAZON_SAGEMAKER
_A = True
_A = 'ml.p3.2xlarge'
_A = 'accelerate_sagemaker_execution_role'
_A = 'hf-sm'
_A = 'us-east-1'
_A = 1
_A = 'accelerate-sagemaker-1'
_A = '1.6'
_A = '4.4'
_A = 'train.py'
_A = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_A = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Any ) -> Optional[int]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCamelCase : Optional[int] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , a )
assert isinstance(converted_args["do_train"] , a )
assert isinstance(converted_args["epochs"] , a )
assert isinstance(converted_args["learning_rate"] , a )
assert isinstance(converted_args["max_steps"] , a )
with pytest.raises(a ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 232 | 0 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> list:
UpperCAmelCase_ = len(__UpperCamelCase )
for i in range(1 , __UpperCamelCase ):
UpperCAmelCase_ = collection[i]
UpperCAmelCase_ = 0
UpperCAmelCase_ = i - 1
while low <= high:
UpperCAmelCase_ = (low + high) // 2
if val < collection[mid]:
UpperCAmelCase_ = mid - 1
else:
UpperCAmelCase_ = mid + 1
for j in range(__UpperCamelCase , __UpperCamelCase , -1 ):
UpperCAmelCase_ = collection[j - 1]
UpperCAmelCase_ = val
return collection
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 177 |
import baseaa
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> bytes:
return baseaa.baaencode(string.encode('''utf-8''' ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : bytes ) -> str:
return baseaa.baadecode(__UpperCamelCase ).decode('''utf-8''' )
if __name__ == "__main__":
_lowerCamelCase = 'Hello World!'
_lowerCamelCase = baseaa_encode(test)
print(encoded)
_lowerCamelCase = baseaa_decode(encoded)
print(decoded)
| 177 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=14 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=4 , a_=4 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=0.02 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = seq_length
__snake_case : Tuple = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : List[Any] = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : Union[str, Any] = vocab_size
__snake_case : Dict = hidden_size
__snake_case : int = rotary_dim
__snake_case : Any = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : Tuple = initializer_range
__snake_case : int = None
__snake_case : Optional[Any] = vocab_size - 1
__snake_case : List[str] = vocab_size - 1
__snake_case : Tuple = vocab_size - 1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = None
if self.use_input_mask:
__snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : int = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=a_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : int = config_and_inputs
__snake_case : Tuple = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = 20
__snake_case : int = model_class_name(a_ )
__snake_case : Tuple = model.init_cache(input_ids.shape[0] , a_ )
__snake_case : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__snake_case : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__snake_case : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=a_ , past_key_values=a_ , position_ids=a_ , )
__snake_case : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
__snake_case : List[str] = model(
input_ids[:, -1:] , attention_mask=a_ , past_key_values=outputs_cache.past_key_values , position_ids=a_ , )
__snake_case : Union[str, Any] = model(a_ )
__snake_case : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = 20
__snake_case : str = model_class_name(a_ )
__snake_case : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__snake_case : Union[str, Any] = model.init_cache(input_ids.shape[0] , a_ )
__snake_case : Any = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__snake_case : List[str] = model(
input_ids[:, :-1] , attention_mask=a_ , past_key_values=a_ , position_ids=a_ , )
__snake_case : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
__snake_case : Optional[int] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=a_ , position_ids=a_ , )
__snake_case : Optional[int] = model(a_ , attention_mask=a_ )
__snake_case : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCamelCase__ =(FlaxGPTJForCausalLM,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = FlaxGPTJModelTester(self )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case , __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(a_ , a_ , a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case , __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
a_ , a_ , a_ , a_ )
@tooslow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
__snake_case : str = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=a_ , truncation=a_ )
__snake_case : Optional[int] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
__snake_case : Any = False
__snake_case : List[str] = model.config.eos_token_id
__snake_case : Dict = jax.jit(model.generate )
__snake_case : Dict = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
__snake_case : Union[str, Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
__snake_case : int = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(a_ , a_ )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__snake_case : Tuple = self._prepare_for_class(a_ , a_ )
__snake_case : str = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__snake_case : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
__snake_case : List[Any] = getattr(a_ , a_ )
__snake_case , __snake_case : Dict = pt_inputs['''input_ids'''].shape
__snake_case : Optional[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
__snake_case : List[str] = 0
__snake_case : Tuple = 1
__snake_case : int = 0
__snake_case : Optional[Any] = 1
__snake_case : Any = pt_model_class(a_ ).eval()
__snake_case : int = model_class(a_ , dtype=jnp.floataa )
__snake_case : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a_ )
__snake_case : str = fx_state
with torch.no_grad():
__snake_case : int = pt_model(**a_ ).to_tuple()
__snake_case : str = fx_model(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(a_ )
__snake_case : List[str] = model_class.from_pretrained(a_ , from_pt=a_ )
__snake_case : int = fx_model_loaded(**a_ ).to_tuple()
self.assertEqual(
len(a_ ) , len(a_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__snake_case : List[Any] = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__snake_case : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
__snake_case : List[Any] = getattr(a_ , a_ )
__snake_case : List[str] = pt_model_class(a_ ).eval()
__snake_case : int = model_class(a_ , dtype=jnp.floataa )
__snake_case : Dict = load_flax_weights_in_pytorch_model(a_ , fx_model.params )
__snake_case , __snake_case : Union[str, Any] = pt_inputs['''input_ids'''].shape
__snake_case : Optional[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
__snake_case : Any = 0
__snake_case : int = 1
__snake_case : int = 0
__snake_case : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__snake_case : List[Any] = pt_model(**a_ ).to_tuple()
__snake_case : List[str] = fx_model(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(a_ )
__snake_case : Any = pt_model_class.from_pretrained(a_ , from_flax=a_ )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model_loaded(**a_ ).to_tuple()
self.assertEqual(
len(a_ ) , len(a_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : List[str] = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
__snake_case : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
| 102 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324 | 0 |
"""simple docstring"""
import math
SCREAMING_SNAKE_CASE : List[str] = 10
SCREAMING_SNAKE_CASE : Optional[int] = 7
SCREAMING_SNAKE_CASE : int = BALLS_PER_COLOUR * NUM_COLOURS
def lowercase ( _snake_case : int = 20 ) ->str:
"""simple docstring"""
__snake_case : Any = math.comb(_snake_case , _snake_case )
__snake_case : Dict = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _snake_case )
__snake_case : List[Any] = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 357 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]:
"""simple docstring"""
def get_masked_lm_array(_snake_case : str ):
__snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : str = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Any = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_array(_snake_case : str ):
__snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_layer_array(_snake_case : int , _snake_case : str ):
__snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ):
__snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case )
__snake_case : int = array.reshape(_snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
print(f"""Loading model based on config from {config_path}...""" )
__snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case )
__snake_case : Dict = BertForMaskedLM(_snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : int = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__snake_case : List[Any] = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : Dict = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' )
__snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' )
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' )
__snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' )
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' )
__snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' )
# Embeddings
__snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' )
__snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
__snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' )
__snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__snake_case : Optional[Any] = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('''dense/kernel''' )
__snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' )
__snake_case : str = get_masked_lm_array('''layer_norm/gamma''' )
__snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' )
__snake_case : Tuple = get_masked_lm_array('''embedding_table''' )
# Pooling
__snake_case : Optional[Any] = BertPooler(config=_snake_case )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(_snake_case )
# Integration test - should load without any errors ;)
__snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24 | 0 |
'''simple docstring'''
import qiskit
def snake_case_ (_a : int , _a : int ):
UpperCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase = qiskit.QuantumCircuit(_a , _a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase = qiskit.execute(_a , _a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_a )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 34 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a (lowerCamelCase ):
def __init__( self : str , __magic_name__ : CLIPSegForImageSegmentation , __magic_name__ : CLIPSegProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> str:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Optional[int] = dict(scheduler.config )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[str] = FrozenDict(__magic_name__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Dict = dict(scheduler.config )
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = FrozenDict(__magic_name__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=__magic_name__ , segmentation_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase_ : Tuple = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : Union[torch.FloatTensor, PIL.Image.Image] , __magic_name__ : str , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
UpperCAmelCase_ : int = self.segmentation_model(**__magic_name__ )
UpperCAmelCase_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(__magic_name__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , )
| 125 | 0 |
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304 | 1 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def a__ ( snake_case__ ) -> str:
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowerCamelCase = precision
lowerCamelCase = ceil(precision / 14 )
lowerCamelCase = 42_68_80 * Decimal(1_00_05 ).sqrt()
lowerCamelCase = 1
lowerCamelCase = 13_59_14_09
lowerCamelCase = Decimal(snake_case__ )
for k in range(1 , snake_case__ ):
lowerCamelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(snake_case__ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase : str = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 291 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ):
"""simple docstring"""
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = spectrogram(
_a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowerCamelCase = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _a ):
lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(_a ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(_a ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 291 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_snake_case = namedtuple("covid_data", "cases deaths recovered")
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(_lowerCamelCase ).content ).xpath(_lowerCamelCase ) )
_snake_case = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 361 |
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_A = logging.get_logger(__name__) # pylint: disable=invalid-name
_A = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=8 ):
__UpperCamelCase =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCamelCase =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , ) -> int:
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
__UpperCamelCase =2 ** (len(self.movq.config.block_out_channels ) - 1)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
if latents is None:
__UpperCamelCase =randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__UpperCamelCase =latents.to(A_ )
__UpperCamelCase =latents * scheduler.init_noise_sigma
return latents
def _a ( self , A_=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__UpperCamelCase =torch.device(f'cuda:{gpu_id}' )
__UpperCamelCase =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def _a ( self , A_=0 ) -> Tuple:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__UpperCamelCase =torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCamelCase =None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCamelCase , __UpperCamelCase =cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
__UpperCamelCase =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ) -> Dict:
__UpperCamelCase =self._execution_device
__UpperCamelCase =guidance_scale > 1.0
if isinstance(A_ , A_ ):
__UpperCamelCase =torch.cat(A_ , dim=0 )
__UpperCamelCase =image_embeds.shape[0] * num_images_per_prompt
if isinstance(A_ , A_ ):
__UpperCamelCase =torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
__UpperCamelCase =image_embeds.repeat_interleave(A_ , dim=0 )
__UpperCamelCase =negative_image_embeds.repeat_interleave(A_ , dim=0 )
__UpperCamelCase =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
__UpperCamelCase =self.scheduler.timesteps
__UpperCamelCase =self.unet.config.in_channels
__UpperCamelCase , __UpperCamelCase =downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
__UpperCamelCase =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
__UpperCamelCase =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCamelCase ={'image_embeds': image_embeds}
__UpperCamelCase =self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
__UpperCamelCase , __UpperCamelCase =noise_pred.split(latents.shape[1] , dim=1 )
__UpperCamelCase , __UpperCamelCase =noise_pred.chunk(2 )
__UpperCamelCase , __UpperCamelCase =variance_pred.chunk(2 )
__UpperCamelCase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCamelCase =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCamelCase , __UpperCamelCase =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase =self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
__UpperCamelCase =self.movq.decode(A_ , force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__UpperCamelCase =image * 0.5 + 0.5
__UpperCamelCase =image.clamp(0 , 1 )
__UpperCamelCase =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCamelCase =self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 62 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# Checks if the entire collection has been sorted
if len(__SCREAMING_SNAKE_CASE ) <= 1 or n <= 1:
return
insert_next(__SCREAMING_SNAKE_CASE , n - 1 )
rec_insertion_sort(__SCREAMING_SNAKE_CASE , n - 1 )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# Checks order between adjacent elements
if index >= len(__SCREAMING_SNAKE_CASE ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__snake_case : int = (
collection[index],
collection[index - 1],
)
insert_next(__SCREAMING_SNAKE_CASE , index + 1 )
if __name__ == "__main__":
lowercase_ = input("Enter integers separated by spaces: ")
lowercase_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 370 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from .. import __version__
__snake_case : List[Any] = take_from
__snake_case : List[Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
__snake_case : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Optional[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
__snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
__snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : List[str] = call_frame.function
__snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 20 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _SCREAMING_SNAKE_CASE ( __A ):
def __init__( self , lowercase , lowercase , lowercase ) -> List[str]:
lowerCamelCase_ = dataset
lowerCamelCase_ = process
lowerCamelCase_ = params
def __len__( self ) -> int:
return len(self.dataset )
def __getitem__( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = self.dataset[i]
lowerCamelCase_ = self.process(lowercase , **self.params )
return processed
class _SCREAMING_SNAKE_CASE ( __A ):
def __init__( self , lowercase , lowercase , lowercase , lowercase=None ) -> Optional[Any]:
lowerCamelCase_ = loader
lowerCamelCase_ = infer
lowerCamelCase_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase_ = None
lowerCamelCase_ = loader_batch_size
# Internal bookkeeping
lowerCamelCase_ = None
lowerCamelCase_ = None
def __len__( self ) -> List[str]:
return len(self.loader )
def __iter__( self ) -> Union[str, Any]:
lowerCamelCase_ = iter(self.loader )
return self
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowercase , lowercase ):
# Convert ModelOutput to tuple first
lowerCamelCase_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase , lowercase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase_ = self._loader_batch_data.__class__(lowercase )
self._loader_batch_index += 1
return result
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase_ = next(self.iterator )
lowerCamelCase_ = self.infer(lowercase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = processed
else:
lowerCamelCase_ = list(processed.keys() )[0]
lowerCamelCase_ = processed[key]
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = len(lowercase )
else:
lowerCamelCase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase_ = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase_ = processed
lowerCamelCase_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _SCREAMING_SNAKE_CASE ( __A ):
def __init__( self , lowercase , lowercase , lowercase , lowercase=None ) -> Dict:
super().__init__(lowercase , lowercase , lowercase )
def __iter__( self ) -> Optional[Any]:
lowerCamelCase_ = iter(self.loader )
lowerCamelCase_ = None
return self
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
if self.subiterator is None:
lowerCamelCase_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase_ = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase_ = next(self.subiterator )
return processed
class _SCREAMING_SNAKE_CASE ( __A ):
def __iter__( self ) -> Optional[int]:
lowerCamelCase_ = iter(self.loader )
return self
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = False
lowerCamelCase_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase_ = self.loader_batch_item()
lowerCamelCase_ = item.pop("is_last" )
accumulator.append(lowercase )
if is_last:
return accumulator
while not is_last:
lowerCamelCase_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = processed
else:
lowerCamelCase_ = list(processed.keys() )[0]
lowerCamelCase_ = processed[key]
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = len(lowercase )
else:
lowerCamelCase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase_ = observed_batch_size
lowerCamelCase_ = processed
lowerCamelCase_ = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase_ = self.loader_batch_item()
lowerCamelCase_ = item.pop("is_last" )
accumulator.append(lowercase )
if is_last:
return accumulator
else:
lowerCamelCase_ = processed
lowerCamelCase_ = item.pop("is_last" )
accumulator.append(lowercase )
return accumulator
class _SCREAMING_SNAKE_CASE ( __A ):
def __init__( self , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = dataset
lowerCamelCase_ = key
def __len__( self ) -> Dict:
return len(self.dataset )
def __getitem__( self , lowercase ) -> List[str]:
return self.dataset[i][self.key]
class _SCREAMING_SNAKE_CASE ( __A ):
def __init__( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
lowerCamelCase_ = dataset
lowerCamelCase_ = keya
lowerCamelCase_ = keya
def __len__( self ) -> Any:
return len(self.dataset )
def __getitem__( self , lowercase ) -> List[str]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 19 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : int = ya
SCREAMING_SNAKE_CASE : int = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k])
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__snake_case ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(__snake_case ) == 1:
return True
A__ = series[1] - series[0]
for index in range(len(__snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def UpperCamelCase ( _lowerCamelCase : Tuple ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__snake_case ) == 0:
raise ValueError("Input list must be a non empty list" )
A__ = 0
for val in series:
answer += val
return answer / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ):
A__ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ""
else:
A__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( _lowerCamelCase : Any ):
A__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
A__ = dct.pop(_lowerCamelCase )
A__ = val
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : int=False ):
A__ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCamelCase , )
A__ = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=3_84 , num_labels=10_00 )
A__ = False
# load original model from timm
A__ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
A__ = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "huggingface/label-files"
A__ = "imagenet-1k-id2label.json"
A__ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ = ViTHybridModel(_lowerCamelCase ).eval()
else:
A__ = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
A__ = transform.transforms
A__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A__ = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(_lowerCamelCase ).unsqueeze(0 )
A__ = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
A__ = model(_lowerCamelCase )
A__ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
A__ = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
A__ = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 123 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A ( lowercase ) -> Dict:
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase , '_dynamo' ):
return False
return isinstance(lowercase , torch._dynamo.eval_frame.OptimizedModule )
def A ( lowercase , lowercase = True ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase = is_compiled_module(lowercase )
if is_compiled:
UpperCamelCase = model
UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase , lowercase ):
UpperCamelCase = model.module
if not keep_fpaa_wrapper:
UpperCamelCase = getattr(lowercase , 'forward' )
UpperCamelCase = model.__dict__.pop('_original_forward' , lowercase )
if original_forward is not None:
while hasattr(lowercase , '__wrapped__' ):
UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase = forward
if getattr(lowercase , '_converted_to_transformer_engine' , lowercase ):
convert_model(lowercase , to_transformer_engine=lowercase )
if is_compiled:
UpperCamelCase = model
UpperCamelCase = compiled_model
return model
def A ( ) -> Optional[int]:
'''simple docstring'''
PartialState().wait_for_everyone()
def A ( lowercase , lowercase ) -> Dict:
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase , lowercase )
elif PartialState().local_process_index == 0:
torch.save(lowercase , lowercase )
@contextmanager
def A ( **lowercase ) -> Optional[Any]:
'''simple docstring'''
for key, value in kwargs.items():
UpperCamelCase = str(lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A ( lowercase ) -> str:
'''simple docstring'''
if not hasattr(lowercase , '__qualname__' ) and not hasattr(lowercase , '__name__' ):
UpperCamelCase = getattr(lowercase , '__class__' , lowercase )
if hasattr(lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase , '__name__' ):
return obj.__name__
return str(lowercase )
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase , lowercase ):
UpperCamelCase = destination.setdefault(lowercase , {} )
merge_dicts(lowercase , lowercase )
else:
UpperCamelCase = value
return destination
def A ( lowercase = None ) -> bool:
'''simple docstring'''
if port is None:
UpperCamelCase = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 222 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase ( unittest.TestCase ):
__lowercase : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = VideoClassificationPipeline(model=A_ , image_processor=A_ , top_k=2 )
UpperCamelCase = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCamelCase ( self , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
for example in examples:
UpperCamelCase = video_classifier(A_ )
self.assertEqual(
A_ , [
{'score': ANY(A_ ), 'label': ANY(A_ )},
{'score': ANY(A_ ), 'label': ANY(A_ )},
] , )
@require_torch
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
UpperCamelCase = pipeline(
'video-classification' , model=A_ , feature_extractor=A_ , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = video_classifier(A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
| 222 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
for i in range(length - 1 ):
snake_case_ = i
for k in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if collection[k] < collection[least]:
snake_case_ = k
if least != i:
snake_case_ , snake_case_ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : List[Any] = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 233 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = """mobilenet_v1"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : int="relu6" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.999 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[Any]=0.001 , **UpperCAmelCase_ : Any , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = min_depth
snake_case_ = hidden_act
snake_case_ = tf_padding
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->float:
"""simple docstring"""
return 1E-4
| 233 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=7, __a=3, __a=30, __a=400, __a=True, __a=None, __a=True, __a=1 / 255, __a=True, __a=[0.5, 0.5, 0.5], __a=[0.5, 0.5, 0.5], __a=True, ):
'''simple docstring'''
_lowerCAmelCase : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Any = min_resolution
_lowerCAmelCase : Tuple = max_resolution
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Any = size
_lowerCAmelCase : Union[str, Any] = do_rescale
_lowerCAmelCase : List[Any] = rescale_factor
_lowerCAmelCase : Tuple = do_normalize
_lowerCAmelCase : Union[str, Any] = image_mean
_lowerCAmelCase : Tuple = image_std
_lowerCAmelCase : Tuple = do_pad
def snake_case__ ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def snake_case__ ( self, __a, __a=False):
'''simple docstring'''
if not batched:
_lowerCAmelCase : List[str] = image_inputs[0]
if isinstance(__a, Image.Image):
_lowerCAmelCase , _lowerCAmelCase : List[str] = image.size
else:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase : Optional[int] = int(self.size["shortest_edge"] * h / w)
_lowerCAmelCase : List[Any] = self.size["shortest_edge"]
elif w > h:
_lowerCAmelCase : str = self.size["shortest_edge"]
_lowerCAmelCase : Union[str, Any] = int(self.size["shortest_edge"] * w / h)
else:
_lowerCAmelCase : Any = self.size["shortest_edge"]
_lowerCAmelCase : List[str] = self.size["shortest_edge"]
else:
_lowerCAmelCase : Optional[int] = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
_lowerCAmelCase : List[str] = max(__a, key=lambda __a: item[0])[0]
_lowerCAmelCase : Union[str, Any] = max(__a, key=lambda __a: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = DetrImageProcessor if is_vision_available() else None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = DetrImageProcessingTester(self)
@property
def snake_case__ ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a, "image_mean"))
self.assertTrue(hasattr(__a, "image_std"))
self.assertTrue(hasattr(__a, "do_normalize"))
self.assertTrue(hasattr(__a, "do_rescale"))
self.assertTrue(hasattr(__a, "rescale_factor"))
self.assertTrue(hasattr(__a, "do_resize"))
self.assertTrue(hasattr(__a, "size"))
self.assertTrue(hasattr(__a, "do_pad"))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333})
self.assertEqual(image_processor.do_pad, __a)
_lowerCAmelCase : int = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=__a)
self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84})
self.assertEqual(image_processor.do_pad, __a)
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a, Image.Image)
# Test not batched input
_lowerCAmelCase : Tuple = image_processing(image_inputs[0], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(__a, batched=__a)
_lowerCAmelCase : Optional[Any] = image_processing(__a, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=__a, numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a, np.ndarray)
# Test not batched input
_lowerCAmelCase : List[str] = image_processing(image_inputs[0], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase : List[Any] = image_processing(__a, return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : int = self.image_processor_tester.get_expected_values(__a, batched=__a)
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=__a, torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a, torch.Tensor)
# Test not batched input
_lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase : Any = image_processing(__a, return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__a, batched=__a)
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f:
_lowerCAmelCase : int = json.loads(f.read())
_lowerCAmelCase : Optional[int] = {"image_id": 3_9769, "annotations": target}
# encode them
_lowerCAmelCase : str = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
_lowerCAmelCase : str = image_processing(images=__a, annotations=__a, return_tensors="pt")
# verify pixel values
_lowerCAmelCase : Optional[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape, __a)
_lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], __a, atol=1E-4))
# verify area
_lowerCAmelCase : int = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], __a))
# verify boxes
_lowerCAmelCase : Union[str, Any] = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, __a)
_lowerCAmelCase : int = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], __a, atol=1E-3))
# verify image_id
_lowerCAmelCase : List[str] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], __a))
# verify is_crowd
_lowerCAmelCase : int = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], __a))
# verify class_labels
_lowerCAmelCase : List[str] = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], __a))
# verify orig_size
_lowerCAmelCase : Optional[int] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], __a))
# verify size
_lowerCAmelCase : Any = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], __a))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f:
_lowerCAmelCase : Union[str, Any] = json.loads(f.read())
_lowerCAmelCase : Tuple = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
_lowerCAmelCase : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# encode them
_lowerCAmelCase : Optional[Any] = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic")
_lowerCAmelCase : str = image_processing(images=__a, annotations=__a, masks_path=__a, return_tensors="pt")
# verify pixel values
_lowerCAmelCase : Optional[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape, __a)
_lowerCAmelCase : str = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], __a, atol=1E-4))
# verify area
_lowerCAmelCase : List[Any] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], __a))
# verify boxes
_lowerCAmelCase : Union[str, Any] = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, __a)
_lowerCAmelCase : List[str] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], __a, atol=1E-3))
# verify image_id
_lowerCAmelCase : List[str] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], __a))
# verify is_crowd
_lowerCAmelCase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], __a))
# verify class_labels
_lowerCAmelCase : int = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], __a))
# verify masks
_lowerCAmelCase : Dict = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item(), __a)
# verify orig_size
_lowerCAmelCase : Tuple = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], __a))
# verify size
_lowerCAmelCase : Tuple = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], __a))
| 36 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 354 | """simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[torch.FloatTensor] = None
_UpperCamelCase : torch.FloatTensor = None
_UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None
_UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__=1 , a__=0 , a__=2 , a__=512 , a__="cls" , a__=False , a__=True , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCAmelCase : Optional[Any] = project_dim
_lowerCAmelCase : List[str] = pooler_fn
_lowerCAmelCase : Any = learn_encoder
_lowerCAmelCase : Optional[int] = use_attention_mask
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = [R"pooler", R"logit_scale"]
_UpperCamelCase : List[Any] = [R"position_ids", R"predictions.decoder.bias"]
_UpperCamelCase : List[Any] = "roberta"
_UpperCamelCase : Optional[int] = RobertaSeriesConfig
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : str = XLMRobertaModel(a__ )
_lowerCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
_lowerCAmelCase : List[Any] = getattr(a__ , """has_pre_transformation""" , a__ )
if self.has_pre_transformation:
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size , config.project_dim )
_lowerCAmelCase : Optional[int] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __A ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , ):
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Optional[int] = self.base_model(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_attentions=a__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=a__ , )
if self.has_pre_transformation:
_lowerCAmelCase : Optional[Any] = outputs["""hidden_states"""][-2]
_lowerCAmelCase : Optional[Any] = self.pre_LN(a__ )
_lowerCAmelCase : int = self.transformation_pre(a__ )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_lowerCAmelCase : Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 126 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=10 ) -> List[Any]:
'''simple docstring'''
lowercase_ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=10 ) -> Any:
'''simple docstring'''
lowercase_ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = os.path.join(__lowerCAmelCase , """schedule.bin""" )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
lowercase_ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple):
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertAlmostEqual(lowerCAmelCase_ , lowerCAmelCase_ , delta=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase_)
lowercase_ = torch.tensor([0.4, 0.2, -0.5])
lowercase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(1_0_0):
lowercase_ = criterion(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase_)
lowercase_ = torch.tensor([0.4, 0.2, -0.5])
lowercase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase_ , weight_decay=0.0 , relative_step=lowerCAmelCase_ , scale_parameter=lowerCAmelCase_ , warmup_init=lowerCAmelCase_ , )
for _ in range(1_0_0_0):
lowercase_ = criterion(lowerCAmelCase_ , lowerCAmelCase_)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
lowercase__ = nn.Linear(50 , 50 ) if is_torch_available() else None
lowercase__ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowercase__ = 10
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str=None):
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertAlmostEqual(lowerCAmelCase_ , lowerCAmelCase_ , delta=lowerCAmelCase_ , msg=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = {"""num_warmup_steps""": 2, """num_training_steps""": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowercase_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowercase_ , lowercase_ = data
lowercase_ = scheduler_func(self.optimizer , **lowerCAmelCase_)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
lowercase_ = unwrap_schedule(lowerCAmelCase_ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase_ , lowerCAmelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
lowercase_ = scheduler_func(self.optimizer , **lowerCAmelCase_)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase_) # wrap to test picklability of the schedule
lowercase_ = unwrap_and_save_reload_schedule(lowerCAmelCase_ , self.num_steps)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ , msg=F'''failed for {scheduler_func} in save and reload''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : int , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = fn
def __call__( self : Optional[int] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
return self.fn(*lowerCAmelCase_ , **lowerCAmelCase_)
@classmethod
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = list(map(self , scheduler.lr_lambdas))
| 136 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = "T5Config"
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
| 136 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __UpperCAmelCase:
"""simple docstring"""
__lowerCamelCase = BlenderbotConfig
__lowerCamelCase = {}
__lowerCamelCase = '''gelu'''
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ):
'''simple docstring'''
lowercase__ : Optional[int]= parent
lowercase__ : str= batch_size
lowercase__ : List[Any]= seq_length
lowercase__ : Dict= is_training
lowercase__ : List[str]= use_labels
lowercase__ : List[Any]= vocab_size
lowercase__ : Tuple= hidden_size
lowercase__ : Dict= num_hidden_layers
lowercase__ : str= num_attention_heads
lowercase__ : int= intermediate_size
lowercase__ : Optional[int]= hidden_dropout_prob
lowercase__ : Tuple= attention_probs_dropout_prob
lowercase__ : Optional[Any]= max_position_embeddings
lowercase__ : Union[str, Any]= eos_token_id
lowercase__ : int= pad_token_id
lowercase__ : Optional[int]= bos_token_id
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase__ : Any= tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase__ : int= tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase__ : str= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str= self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase__ : List[Any]= prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= TFBlenderbotModel(config=_a ).get_decoder()
lowercase__ : Dict= inputs_dict['input_ids']
lowercase__ : List[str]= input_ids[:1, :]
lowercase__ : Any= inputs_dict['attention_mask'][:1, :]
lowercase__ : Dict= inputs_dict['head_mask']
lowercase__ : Dict= 1
# first forward pass
lowercase__ : Optional[int]= model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
lowercase__ : str= outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ : int= ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ : Tuple= tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase__ : Dict= tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase__ : Union[str, Any]= tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase__ : int= model(_a , attention_mask=_a )[0]
lowercase__ : List[str]= model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase__ : Dict= int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase__ : List[str]= output_from_no_past[:, -3:, random_slice_idx]
lowercase__ : Optional[int]= output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def lowercase__(A , A , A , A=None , A=None , A=None , A=None , A=None , ) ->int:
"""simple docstring"""
if attention_mask is None:
lowercase__ : Dict= tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase__ : Union[str, Any]= tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase__ : Optional[Any]= tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase__ : Any= tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase__ : Any= tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCAmelCase( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowerCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= TFBlenderbotModelTester(self )
lowercase__ : List[str]= ConfigTester(self , config_class=_a )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = ['''My friends are cool but they eat too many carbs.''']
__lowerCamelCase = '''facebook/blenderbot-400M-distill'''
@cached_property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[str]= TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.tokenizer(self.src_text , return_tensors="tf" )
lowercase__ : List[Any]= self.model.generate(
model_inputs.input_ids , )
lowercase__ : Dict= self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 355 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a : Optional[Any] = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
a : str = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def lowercase__() ->List[Any]:
"""simple docstring"""
lowercase__ : str= calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(A , A )
lowercase__ : Optional[int]= calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : Optional[int]= "rougeLsum"
lowercase__ : str= calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
lowercase__ : Union[str, Any]= calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Tuple= ["rouge1", "rouge2", "rougeL"]
lowercase__ : Optional[Any]= calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
lowercase__ : Dict= calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
assert score_sep == score_no_sep
def lowercase__() ->Optional[int]:
"""simple docstring"""
lowercase__ : int= [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowercase__ : Dict= [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(A , A , newline_sep=A ) == calculate_rouge(A , A , newline_sep=A )
def lowercase__() ->Dict:
"""simple docstring"""
lowercase__ : List[str]= [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowercase__ : Union[str, Any]= [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowercase__ : List[Any]= calculate_rouge(A , A , rouge_keys=["rougeLsum"] , newline_sep=A )["rougeLsum"]
lowercase__ : Optional[Any]= calculate_rouge(A , A , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def lowercase__() ->Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any]= Path("examples/seq2seq/test_data/wmt_en_ro" )
lowercase__ : Union[str, Any]= calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(A , A )
lowercase__ : List[Any]= calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=A )
assert isinstance(A , A )
| 150 | 0 |
"""simple docstring"""
from timeit import timeit
def lowercase_ ( _snake_case ):
if number < 0:
raise ValueError("""the value of input must not be negative""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
while number:
number &= number - 1
result += 1
return result
def lowercase_ ( _snake_case ):
if number < 0:
raise ValueError("""the value of input must not be negative""" )
SCREAMING_SNAKE_CASE__ : List[str] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase_ ( ):
def do_benchmark(_snake_case ) -> None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """import __main__ as z"""
print(f'''Benchmark when {number = }:''' )
print(f'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
SCREAMING_SNAKE_CASE__ : Any = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" ,setup=_UpperCAmelCase )
print(f'''timeit() runs in {timing} seconds''' )
print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" ,setup=_UpperCAmelCase ,)
print(f'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 25 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__snake_case :List[Any] = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __snake_case ( _UpperCAmelCase ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]['''hidden_dim''']
__a = CONFIG_MAP[model_name]['''width_coef''']
__a = CONFIG_MAP[model_name]['''depth_coef''']
__a = CONFIG_MAP[model_name]['''image_size''']
__a = CONFIG_MAP[model_name]['''dropout_rate''']
__a = CONFIG_MAP[model_name]['''dw_padding''']
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def __snake_case ( _UpperCAmelCase ):
__a = CONFIG_MAP[model_name]['''image_size''']
__a = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def __snake_case ( _UpperCAmelCase ):
__a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__a = sorted(set(_UpperCAmelCase ) )
__a = len(_UpperCAmelCase )
__a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
__a = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = '''efficientnet.''' + item[1]
__a = '''classifier.weight'''
__a = '''classifier.bias'''
return key_mapping
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
__a = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = model_classes[model_name](
include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(_UpperCAmelCase )
__a = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__a = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(_UpperCAmelCase )
__a = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**_UpperCAmelCase )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]['''image_size''']
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(_UpperCAmelCase )
__a = np.expand_dims(_UpperCAmelCase , axis=0 )
__a = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
__a = f'efficientnet-{model_name}'
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__snake_case :Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 49 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)] )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , config_name=__a )
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig.from_pretrained(__a , config_name=__a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , __a )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained("gpt2" )
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig.from_model_config(__a )
_SCREAMING_SNAKE_CASE : str = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__a , __a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig()
_SCREAMING_SNAKE_CASE : Any = {
'max_new_tokens': 1_0_2_4,
'foo': 'bar',
}
_SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(__a )
_SCREAMING_SNAKE_CASE : Tuple = generation_config.update(**__a )
# update_kwargs was not modified (no side effects)
self.assertEqual(__a , __a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__a , {"foo": "bar"} )
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = GenerationConfig()
_SCREAMING_SNAKE_CASE : Tuple = 'bar'
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(__a )
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_pretrained(__a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_model_config(__a )
assert not hasattr(__a , "foo" ) # no new kwargs should be initialized if from config
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __a )
self.assertEqual(default_config.num_beams , 1 )
_SCREAMING_SNAKE_CASE : List[Any] = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a )
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_pretrained(__a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = TOKEN
HfFolder.save_token(__a )
@classmethod
def UpperCamelCase_ ( cls ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : str = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="test-generation-config" , push_to_hub=__a , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : List[str] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Dict = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="valid_org/test-generation-config-org" , push_to_hub=__a , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : Dict = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) ) | 350 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 325 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = IMAGENET_DEFAULT_MEAN , _UpperCAmelCase = IMAGENET_DEFAULT_STD , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: List[str] = size if size is not None else {'''shortest_edge''': 224}
lowercase__: Optional[int] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
lowercase__: Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__: Any = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
lowercase__: Optional[int] = do_resize
lowercase__: List[str] = size
lowercase__: Optional[int] = resample
lowercase__: Tuple = do_center_crop
lowercase__: Dict = crop_size
lowercase__: Optional[Any] = do_rescale
lowercase__: List[str] = rescale_factor
lowercase__: str = do_normalize
lowercase__: List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__: List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: int = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase__: str = int((256 / 224) * size['''shortest_edge'''] )
lowercase__: Union[str, Any] = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase )
lowercase__: Any = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_UpperCAmelCase , size=(size_dict['''height'''], size_dict['''width''']) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: Optional[Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
lowercase__: int = do_resize if do_resize is not None else self.do_resize
lowercase__: Optional[int] = resample if resample is not None else self.resample
lowercase__: List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__: Any = do_rescale if do_rescale is not None else self.do_rescale
lowercase__: Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__: Any = do_normalize if do_normalize is not None else self.do_normalize
lowercase__: int = image_mean if image_mean is not None else self.image_mean
lowercase__: List[Any] = image_std if image_std is not None else self.image_std
lowercase__: int = size if size is not None else self.size
lowercase__: str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
lowercase__: Any = crop_size if crop_size is not None else self.crop_size
lowercase__: Tuple = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
lowercase__: Dict = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase__: List[str] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
lowercase__: List[Any] = [self.resize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_center_crop:
lowercase__: List[Any] = [self.center_crop(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_rescale:
lowercase__: Optional[int] = [self.rescale(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_normalize:
lowercase__: Optional[Any] = [self.normalize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for image in images]
lowercase__: List[str] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
lowercase__: List[Any] = {'''pixel_values''': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 177 | """simple docstring"""
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]:
lowercase__: str = []
lowercase__: str = 1_1
lowercase__: str = int('''1''' + '''0''' * digit_len )
for num in range(__UpperCAmelCase , __UpperCAmelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__UpperCAmelCase , __UpperCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowercase__: Dict = 1_0
return solutions
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 2 ) -> int:
lowercase__: List[str] = 1.0
for fraction in fraction_list(__UpperCAmelCase ):
lowercase__: List[str] = Fraction(__UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 177 | 1 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float = 0.1 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 3
UpperCamelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
while b > 0:
if b & 1:
_lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 36 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__A : Tuple = logging.getLogger(__name__)
@dataclass
class __A :
lowerCAmelCase_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCAmelCase_ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __A :
lowerCAmelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={"help": "The input training data file (a text file)."} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase_ : bool = field(
default=lowerCAmelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase__ ( self : List[str] ):
if self.train_file is not None:
lowerCAmelCase : int = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCAmelCase : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __A :
lowerCAmelCase_ : PreTrainedTokenizerBase
lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = True
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
def __call__( self : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Union[str, Any] = 'label' if 'label' in features[0].keys() else 'labels'
lowerCAmelCase : str = [feature.pop(UpperCAmelCase_ ) for feature in features]
lowerCAmelCase : int = len(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = len(features[0]['input_ids'] )
lowerCAmelCase : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase_ )] for feature in features
]
lowerCAmelCase : Tuple = list(chain(*UpperCAmelCase_ ) )
lowerCAmelCase : Tuple = self.tokenizer.pad(
UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
lowerCAmelCase : Any = {k: v.view(UpperCAmelCase_ , UpperCAmelCase_ , -1 ) for k, v in batch.items()}
# Add back labels
lowerCAmelCase : str = torch.tensor(UpperCAmelCase_ , dtype=torch.intaa )
return batch
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag', _UpperCAmelCase, _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCAmelCase : Union[str, Any] = {}
if data_args.train_file is not None:
lowerCAmelCase : Dict = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase : List[str] = data_args.validation_file
lowerCAmelCase : Any = data_args.train_file.split('.' )[-1]
lowerCAmelCase : List[str] = load_dataset(
_UpperCAmelCase, data_files=_UpperCAmelCase, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
lowerCAmelCase : List[str] = load_dataset(
'swag', 'regular', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
lowerCAmelCase : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=_UpperCAmelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCAmelCase : str = [f"ending{i}" for i in range(4 )]
lowerCAmelCase : List[Any] = 'sent1'
lowerCAmelCase : Optional[int] = 'sent2'
if data_args.max_seq_length is None:
lowerCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
lowerCAmelCase : List[Any] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCAmelCase : Optional[Any] = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCAmelCase ):
lowerCAmelCase : str = [[context] * 4 for context in examples[context_name]]
lowerCAmelCase : Optional[int] = examples[question_header_name]
lowerCAmelCase : Optional[Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(_UpperCAmelCase )
]
# Flatten out
lowerCAmelCase : List[Any] = list(chain(*_UpperCAmelCase ) )
lowerCAmelCase : str = list(chain(*_UpperCAmelCase ) )
# Tokenize
lowerCAmelCase : int = tokenizer(
_UpperCAmelCase, _UpperCAmelCase, truncation=_UpperCAmelCase, max_length=_UpperCAmelCase, padding='max_length' if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(_UpperCAmelCase ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowerCAmelCase : List[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
lowerCAmelCase : Tuple = min(len(_UpperCAmelCase ), data_args.max_train_samples )
lowerCAmelCase : Dict = train_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCAmelCase : Union[str, Any] = train_dataset.map(
_UpperCAmelCase, batched=_UpperCAmelCase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowerCAmelCase : Dict = raw_datasets['validation']
if data_args.max_eval_samples is not None:
lowerCAmelCase : Optional[int] = min(len(_UpperCAmelCase ), data_args.max_eval_samples )
lowerCAmelCase : Tuple = eval_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCAmelCase : str = eval_dataset.map(
_UpperCAmelCase, batched=_UpperCAmelCase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
lowerCAmelCase : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCAmelCase, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCAmelCase ):
lowerCAmelCase , lowerCAmelCase : int = eval_predictions
lowerCAmelCase : Optional[int] = np.argmax(_UpperCAmelCase, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCAmelCase : Union[str, Any] = Trainer(
model=_UpperCAmelCase, args=_UpperCAmelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=_UpperCAmelCase, data_collator=_UpperCAmelCase, compute_metrics=_UpperCAmelCase, )
# Training
if training_args.do_train:
lowerCAmelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : Optional[Any] = last_checkpoint
lowerCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase : int = train_result.metrics
lowerCAmelCase : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
lowerCAmelCase : Tuple = min(_UpperCAmelCase, len(_UpperCAmelCase ) )
trainer.log_metrics('train', _UpperCAmelCase )
trainer.save_metrics('train', _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase : int = trainer.evaluate()
lowerCAmelCase : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
lowerCAmelCase : Any = min(_UpperCAmelCase, len(_UpperCAmelCase ) )
trainer.log_metrics('eval', _UpperCAmelCase )
trainer.save_metrics('eval', _UpperCAmelCase )
lowerCAmelCase : Any = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 323 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"text": Value("string")})
a_ = Features({})
a_ = "text"
@property
def A ( self : List[str] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 304 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
)
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 )
print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 304 | 1 |
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
SCREAMING_SNAKE_CASE__ = {value: key for key, value in encode_dict.items()}
def lowercase__ ( __UpperCamelCase )-> str:
UpperCamelCase = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def lowercase__ ( __UpperCamelCase )-> str:
if set(__UpperCamelCase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
UpperCamelCase = """"""
for word in coded.split():
while len(__UpperCamelCase ) != 0:
decoded += decode_dict[word[:5]]
UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 183 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = prime_factors(__UpperCamelCase )
if is_square_free(__UpperCamelCase ):
return -1 if len(__UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
A_ : List[Any] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
A_ : Union[str, Any] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
A_ : Optional[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a (datasets.Metric ):
'''simple docstring'''
def __A ( self ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def __A ( self , A__ , A__ , A__ = False , A__ = False , A__ = False , A__ = False , ):
A__ : List[str] = len(references[0] )
if any(len(A__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ : int = [[refs[i] for refs in references] for i in range(A__ )]
A__ : Optional[Any] = TER(
normalized=A__ , no_punct=A__ , asian_support=A__ , case_sensitive=A__ , )
A__ : List[Any] = sb_ter.corpus_score(A__ , A__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 192 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
A_ : Tuple = tmp_path / "cache"
A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str:
A_ : List[Any] = tmp_path / "cache"
A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : int = features.copy() if features else default_expected_features
A_ : str = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]:
A_ : Dict = tmp_path / "cache"
A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : int = parquet_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : Optional[int] = [parquet_path]
A_ : Optional[int] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
A_ : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Union[str, Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : List[str] = features.copy() if features else default_expected_features
A_ : Tuple = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]:
if split:
A_ : Any = {split: parquet_path}
else:
A_ : Optional[Any] = "train"
A_ : str = {"train": parquet_path, "test": parquet_path}
A_ : Any = tmp_path / "cache"
A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict:
A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
A_ : Dict = pf.read()
assert dataset.data.table == output_table
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]:
A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" )
A_ : int = {"image": [image_path]}
A_ : Optional[Any] = Features({"image": Image()} )
A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase )
A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any:
assert get_writer_batch_size(_lowerCAmelCase ) == expected
| 300 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = "▁"
__lowerCamelCase = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
__lowerCamelCase = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
__lowerCamelCase = {
"facebook/s2t-small-librispeech-asr": 10_24,
}
__lowerCamelCase = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
__lowerCamelCase = {"mustc": MUSTC_LANGS}
class UpperCAmelCase ( snake_case__ ):
A__ : int = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = MAX_MODEL_INPUT_SIZES
A__ : Any = ["input_ids", "attention_mask"]
A__ : Optional[int] = []
def __init__(self : Any , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Dict="<s>" , snake_case__ : Dict="</s>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : Tuple="<unk>" , snake_case__ : int=False , snake_case__ : List[str]=False , snake_case__ : Dict=None , snake_case__ : List[Any]=None , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , do_upper_case=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , lang_codes=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
snake_case : List[Any] = do_upper_case
snake_case : List[Any] = do_lower_case
snake_case : Union[str, Any] = load_json(UpperCAmelCase_ )
snake_case : Tuple = {v: k for k, v in self.encoder.items()}
snake_case : int = spm_file
snake_case : List[str] = load_spm(UpperCAmelCase_ , self.sp_model_kwargs )
if lang_codes is not None:
snake_case : Optional[int] = lang_codes
snake_case : Any = LANGUAGES[lang_codes]
snake_case : Union[str, Any] = [f"""<lang:{lang}>""" for lang in self.langs]
snake_case : Union[str, Any] = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
snake_case : Any = self.lang_tokens
snake_case : Tuple = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
snake_case : Any = {}
@property
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
return len(self.encoder )
@property
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : int ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str ) -> int:
'''simple docstring'''
snake_case : List[Any] = self.lang_code_to_id[tgt_lang]
snake_case : int = [lang_code_id]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> Any:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase_ , self.encoder[self.unk_token] )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = []
snake_case : Union[str, Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
snake_case : Any = self.sp_model.decode(UpperCAmelCase_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
snake_case : int = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case : Dict = self.sp_model.decode(UpperCAmelCase_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Any , snake_case__ : Dict=None ) -> List[Any]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> str:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
snake_case : int = [1] * len(self.prefix_tokens )
snake_case : Optional[int] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
snake_case : Optional[int] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Dict ) -> Dict:
'''simple docstring'''
snake_case : Any = self.__dict__.copy()
snake_case : List[str] = None
return state
def __setstate__(self : int , snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Union[str, Any] = {}
snake_case : Tuple = load_spm(self.spm_file , self.sp_model_kwargs )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = Path(UpperCAmelCase_ )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
snake_case : List[str] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
snake_case : int = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , UpperCAmelCase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCAmelCase_ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCAmelCase_ , "wb" ) as fi:
snake_case : int = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (str(UpperCAmelCase_ ), str(UpperCAmelCase_ ))
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : Dict = sentencepiece.SentencePieceProcessor(**_UpperCAmelCase )
spm.Load(str(_UpperCAmelCase ) )
return spm
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
with open(_UpperCAmelCase , "r" ) as f:
return json.load(_UpperCAmelCase )
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict ):
with open(_UpperCAmelCase , "w" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=2 )
| 364 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case , snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowercase = 25_0004
__lowercase = 25_0020
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Optional[int] = MBartTokenizer
a__ : str = MBartTokenizerFast
a__ : Dict = True
a__ : int = True
def UpperCamelCase__ ( self) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase :str = MBartTokenizer(__lowercase , keep_accents=__lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = MBartTokenizer(__lowercase , keep_accents=__lowercase)
__UpperCamelCase :Any = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCamelCase :str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__UpperCamelCase :Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase)
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__UpperCamelCase :str = tokenizer.convert_ids_to_tokens(__lowercase)
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase__ ( self) -> Any:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCamelCase :Union[str, Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__UpperCamelCase :Tuple = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase)
__UpperCamelCase :Optional[Any] = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase)
__UpperCamelCase :Optional[int] = tempfile.mkdtemp()
__UpperCamelCase :Dict = tokenizer_r.save_pretrained(__lowercase)
__UpperCamelCase :Tuple = tokenizer_p.save_pretrained(__lowercase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
__UpperCamelCase :Optional[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f)
self.assertSequenceEqual(__lowercase , __lowercase)
# Checks everything loads correctly in the same way
__UpperCamelCase :Any = tokenizer_r.from_pretrained(__lowercase)
__UpperCamelCase :str = tokenizer_p.from_pretrained(__lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase)
# Save tokenizer rust, legacy_format=True
__UpperCamelCase :int = tempfile.mkdtemp()
__UpperCamelCase :Dict = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase)
__UpperCamelCase :str = tokenizer_p.save_pretrained(__lowercase)
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase)
# Checks everything loads correctly in the same way
__UpperCamelCase :Optional[int] = tokenizer_r.from_pretrained(__lowercase)
__UpperCamelCase :Tuple = tokenizer_p.from_pretrained(__lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase))
shutil.rmtree(__lowercase)
# Save tokenizer rust, legacy_format=False
__UpperCamelCase :List[Any] = tempfile.mkdtemp()
__UpperCamelCase :List[Any] = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase)
__UpperCamelCase :Union[str, Any] = tokenizer_p.save_pretrained(__lowercase)
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
__UpperCamelCase :Tuple = tokenizer_r.from_pretrained(__lowercase)
__UpperCamelCase :Optional[int] = tokenizer_p.from_pretrained(__lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase))
shutil.rmtree(__lowercase)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ : Any = "facebook/mbart-large-en-ro"
a__ : Union[str, Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
a__ : str = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
a__ : Optional[Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def UpperCamelCase__ ( cls) -> Union[str, Any]:
__UpperCamelCase :MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''')
__UpperCamelCase :Optional[int] = 1
return cls
def UpperCamelCase__ ( self) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250_001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250_004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250_020)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[str] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase)
def UpperCamelCase__ ( self) -> Tuple:
self.assertIn(__lowercase , self.tokenizer.all_special_ids)
__UpperCamelCase :int = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__UpperCamelCase :Dict = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase)
__UpperCamelCase :str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase)
self.assertEqual(__lowercase , __lowercase)
self.assertNotIn(self.tokenizer.eos_token , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __lowercase)
__UpperCamelCase :Tuple = 10
__UpperCamelCase :Dict = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , __lowercase)
self.assertEqual(len(__lowercase) , __lowercase)
def UpperCamelCase__ ( self) -> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR''']) , [250_026, 250_001])
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Union[str, Any] = tempfile.mkdtemp()
__UpperCamelCase :Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase)
__UpperCamelCase :List[str] = MBartTokenizer.from_pretrained(__lowercase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase)
@require_torch
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''')
__UpperCamelCase :List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens) , return_tensors='''pt''' , )
__UpperCamelCase :Any = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
self.assertIsInstance(__lowercase , __lowercase)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
__UpperCamelCase :int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE])
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''')
__UpperCamelCase :Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=10 , return_tensors='''pt''')
__UpperCamelCase :Dict = targets["""input_ids"""]
__UpperCamelCase :int = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''')
self.assertEqual(
nested_simplify(__lowercase) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3_034, 2, 250_004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250_001,
} , )
| 43 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class __snake_case ( lowerCAmelCase ):
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.framework == "tf":
lowercase : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = self.get_masked_index(snake_case )
lowercase : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,f"No mask_token ({self.tokenizer.mask_token}) found on the input" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,**snake_case ):
'''simple docstring'''
if return_tensors is None:
lowercase : int = self.framework
lowercase : Optional[Any] = self.tokenizer(snake_case ,return_tensors=snake_case )
self.ensure_exactly_one_mask_token(snake_case )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.model(**snake_case )
lowercase : Tuple = model_inputs["""input_ids"""]
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ,snake_case=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase : str = target_ids.shape[0]
lowercase : Optional[Any] = model_outputs["""input_ids"""][0]
lowercase : List[str] = model_outputs["""logits"""]
if self.framework == "tf":
lowercase : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase : Tuple = outputs.numpy()
lowercase : Tuple = outputs[0, masked_index, :]
lowercase : Any = stable_softmax(snake_case ,axis=-1 )
if target_ids is not None:
lowercase : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case ,0 ) ,target_ids.reshape(-1 ,1 ) )
lowercase : int = tf.expand_dims(snake_case ,0 )
lowercase : Tuple = tf.math.top_k(snake_case ,k=snake_case )
lowercase , lowercase : int = topk.values.numpy(), topk.indices.numpy()
else:
lowercase : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase : Union[str, Any] = outputs[0, masked_index, :]
lowercase : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase : List[str] = probs[..., target_ids]
lowercase , lowercase : Union[str, Any] = probs.topk(snake_case )
lowercase : Any = []
lowercase : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
lowercase : Dict = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
lowercase : Dict = input_ids.numpy().copy()
if target_ids is not None:
lowercase : Union[str, Any] = target_ids[p].tolist()
lowercase : Tuple = p
# Filter padding out:
lowercase : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase : Tuple = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case )
lowercase : Optional[Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(snake_case )
result.append(snake_case )
if single_mask:
return result[0]
return result
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
lowercase : List[Any] = [targets]
try:
lowercase : List[str] = self.tokenizer.get_vocab()
except Exception:
lowercase : Any = {}
lowercase : Dict = []
for target in targets:
lowercase : Dict = vocab.get(snake_case ,snake_case )
if id_ is None:
lowercase : Optional[int] = self.tokenizer(
snake_case ,add_special_tokens=snake_case ,return_attention_mask=snake_case ,return_token_type_ids=snake_case ,max_length=1 ,truncation=snake_case ,)["""input_ids"""]
if len(snake_case ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
lowercase : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowercase : Optional[Any] = list(set(snake_case ) )
if len(snake_case ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
lowercase : Optional[Any] = np.array(snake_case )
return target_ids
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ,snake_case=None ):
'''simple docstring'''
lowercase : Dict = {}
if targets is not None:
lowercase : str = self.get_target_ids(snake_case ,snake_case )
lowercase : List[Any] = target_ids
if top_k is not None:
lowercase : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,"""The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self ,snake_case ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Tuple = super().__call__(snake_case ,**snake_case )
if isinstance(snake_case ,snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
| 20 | 0 |
__UpperCAmelCase = {str(digit): digit**5 for digit in range(10)}
def A__ ( __lowerCamelCase ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__lowerCamelCase ) )
def A__ ( ):
return sum(
number
for number in range(10_00, 1_00_00_00 )
if number == digits_fifth_powers_sum(__lowerCamelCase ) )
if __name__ == "__main__":
print(solution())
| 257 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCamelCase, __lowerCamelCase, is_trivia_qa=__lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 257 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
UpperCamelCase__ :int = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCamelCase__ :Any = model(UpperCamelCase_ )['''last_hidden_state''']
UpperCamelCase__ :Any = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
UpperCamelCase__ :Any = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 97 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Load configuration defined in the metadata file
with open(__lowerCamelCase ) as metadata_file:
__snake_case : Tuple = json.load(__lowerCamelCase )
__snake_case : int = LukeConfig(use_entity_aware_attention=__lowerCamelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__snake_case : Any = torch.load(__lowerCamelCase , map_location="cpu" )["module"]
# Load the entity vocab file
__snake_case : Any = load_original_entity_vocab(__lowerCamelCase )
# add an entry for [MASK2]
__snake_case : int = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case : List[str] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case : List[Any] = AddedToken("<ent>" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
__snake_case : str = AddedToken("<ent2>" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "r" ) as f:
__snake_case : int = json.load(__lowerCamelCase )
__snake_case : str = "MLukeTokenizer"
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
__snake_case : Any = MLukeTokenizer.from_pretrained(__lowerCamelCase )
# Initialize the embeddings of the special tokens
__snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(["@"] )[0]
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(["#"] )[0]
__snake_case : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
__snake_case : Tuple = word_emb[ent_init_index].unsqueeze(0 )
__snake_case : Tuple = word_emb[enta_init_index].unsqueeze(0 )
__snake_case : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case : Optional[int] = state_dict[bias_name]
__snake_case : Any = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case : List[str] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case : Optional[int] = F'encoder.layer.{layer_index}.attention.self.'
__snake_case : int = state_dict[prefix + matrix_name]
__snake_case : Optional[Any] = state_dict[prefix + matrix_name]
__snake_case : Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case : Union[str, Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
__snake_case : Union[str, Any] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case : List[Any] = state_dict["entity_predictions.bias"]
__snake_case : str = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Optional[int] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case : Any = LukeForMaskedLM(config=__lowerCamelCase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__snake_case : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__snake_case : Dict = state_dict[key]
else:
__snake_case : int = state_dict[key]
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
if set(__lowerCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(__lowerCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case : Union[str, Any] = MLukeTokenizer.from_pretrained(__lowerCamelCase , task="entity_classification" )
__snake_case : Optional[Any] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__snake_case : Tuple = (0, 9)
__snake_case : Dict = tokenizer(__lowerCamelCase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Optional[Any] = model(**__lowerCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : str = torch.Size((1, 3_3, 7_6_8) )
__snake_case : List[str] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : Union[str, Any] = torch.Size((1, 1, 7_6_8) )
__snake_case : Optional[int] = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case : Union[str, Any] = MLukeTokenizer.from_pretrained(__lowerCamelCase )
__snake_case : List[Any] = "Tokyo is the capital of <mask>."
__snake_case : List[Any] = (2_4, 3_0)
__snake_case : Tuple = tokenizer(__lowerCamelCase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Optional[Any] = model(**__lowerCamelCase )
__snake_case : Tuple = encoding["input_ids"][0].tolist()
__snake_case : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowerCamelCase )
__snake_case : Dict = outputs.entity_logits[0][0].argmax().item()
__snake_case : Dict = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__lowerCamelCase ) )
model.save_pretrained(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = ["[MASK]", "[PAD]", "[UNK]"]
__snake_case : Tuple = [json.loads(__lowerCamelCase ) for line in open(__lowerCamelCase )]
__snake_case : Dict = {}
for entry in data:
__snake_case : Optional[Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case : Union[str, Any] = entity_id
break
__snake_case : Tuple = F'{language}:{entity_name}'
__snake_case : int = entity_id
return new_mapping
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_snake_case : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 123 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> Any:
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(a_ ) / len(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Optional[Any] = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 204 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Dict = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
lowerCamelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def snake_case_ ( lowerCAmelCase_ : str ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowercase : str = model_type_to_module_name(lowerCAmelCase_ )
__lowercase : Union[str, Any] = importlib.import_module(F".{module_name}" , """transformers.models""" )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , """__name__""" , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowercase : int = importlib.import_module("""transformers""" )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def snake_case_ ( lowerCAmelCase_ : Union[str, os.PathLike] , lowerCAmelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[Dict[str, str]] = None , lowerCAmelCase_ : Optional[Union[bool, str]] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : List[Any] , ):
__lowercase : Tuple = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as reader:
return json.load(lowerCAmelCase_ )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : int ) -> Dict:
"""simple docstring"""
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__a )
def lowerCAmelCase ( cls : str , __a : Tuple , **__a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : Tuple = kwargs.pop("""config""" , __a )
__lowercase : Optional[int] = kwargs.pop("""trust_remote_code""" , __a )
__lowercase : Optional[int] = True
__lowercase , __lowercase : Any = FeatureExtractionMixin.get_feature_extractor_dict(__a , **__a )
__lowercase : Optional[Any] = config_dict.get("""feature_extractor_type""" , __a )
__lowercase : int = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__lowercase : str = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__a , __a ):
__lowercase : int = AutoConfig.from_pretrained(__a , **__a )
# It could be in `config.feature_extractor_type``
__lowercase : List[str] = getattr(__a , """feature_extractor_type""" , __a )
if hasattr(__a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
__lowercase : Any = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
__lowercase : List[Any] = feature_extractor_class_from_name(__a )
__lowercase : List[Any] = feature_extractor_auto_map is not None
__lowercase : Union[str, Any] = feature_extractor_class is not None or type(__a ) in FEATURE_EXTRACTOR_MAPPING
__lowercase : Dict = resolve_trust_remote_code(
__a , __a , __a , __a )
if has_remote_code and trust_remote_code:
__lowercase : Any = get_class_from_dynamic_module(
__a , __a , **__a )
__lowercase : Tuple = kwargs.pop("""code_revision""" , __a )
if os.path.isdir(__a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__a , **__a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__a , **__a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__a ) in FEATURE_EXTRACTOR_MAPPING:
__lowercase : List[Any] = FEATURE_EXTRACTOR_MAPPING[type(__a )]
return feature_extractor_class.from_dict(__a , **__a )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def lowerCAmelCase ( __a : Any , __a : Union[str, Any] ) -> Dict:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__a , __a ) | 233 |
from maths.prime_check import is_prime
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Dict = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase_ )
if is_prime(lowerCAmelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 233 | 1 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def UpperCamelCase ( a , a , a=1e-12 ) -> List[str]:
'''simple docstring'''
__magic_name__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(a , axis=1 ) , a_min=a ) ).T
__magic_name__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(a , axis=1 ) , a_min=a ) ).T
return jnp.matmul(a , norm_emb_a.T )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
__SCREAMING_SNAKE_CASE :CLIPConfig
__SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
def snake_case__ ( self : Any ):
__magic_name__ = FlaxCLIPVisionModule(self.config.vision_config )
__magic_name__ = nn.Dense(self.config.projection_dim , use_bias=a__ , dtype=self.dtype )
__magic_name__ = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__magic_name__ = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__magic_name__ = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
__magic_name__ = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self : Any , a__ : List[str] ):
__magic_name__ = self.vision_model(a__ )[1]
__magic_name__ = self.visual_projection(a__ )
__magic_name__ = jax_cosine_distance(a__ , self.special_care_embeds )
__magic_name__ = jax_cosine_distance(a__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__magic_name__ = 0.0
__magic_name__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__magic_name__ = jnp.round(a__ , 3 )
__magic_name__ = jnp.any(special_scores > 0 , axis=1 , keepdims=a__ )
# Use a lower threshold if an image has any special care concept
__magic_name__ = is_special_care * 0.01
__magic_name__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__magic_name__ = jnp.round(a__ , 3 )
__magic_name__ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[Any] = CLIPConfig
__SCREAMING_SNAKE_CASE :int = """clip_input"""
__SCREAMING_SNAKE_CASE :Tuple = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Optional[int] , a__ : CLIPConfig , a__ : Optional[Tuple] = None , a__ : int = 0 , a__ : jnp.dtype = jnp.floataa , a__ : bool = True , **a__ : Optional[Any] , ):
if input_shape is None:
__magic_name__ = (1, 224, 224, 3)
__magic_name__ = self.module_class(config=a__ , dtype=a__ , **a__ )
super().__init__(a__ , a__ , input_shape=a__ , seed=a__ , dtype=a__ , _do_init=_do_init )
def snake_case__ ( self : Any , a__ : jax.random.KeyArray , a__ : Tuple , a__ : FrozenDict = None ):
# init input tensor
__magic_name__ = jax.random.normal(a__ , a__ )
__magic_name__ , __magic_name__ = jax.random.split(a__ )
__magic_name__ = {'''params''': params_rng, '''dropout''': dropout_rng}
__magic_name__ = self.module.init(a__ , a__ )['''params''']
return random_params
def __call__( self : Optional[Any] , a__ : Dict , a__ : dict = None , ):
__magic_name__ = jnp.transpose(a__ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(a__ , dtype=jnp.floataa ) , rngs={} , )
| 356 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase ( a , a ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase ( a ) -> list[str]:
'''simple docstring'''
__magic_name__ = []
__magic_name__ = 11
__magic_name__ = int('''1''' + '''0''' * digit_len )
for num in range(a , a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a , a ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__magic_name__ = 10
return solutions
def UpperCamelCase ( a = 2 ) -> int:
'''simple docstring'''
__magic_name__ = 1.0
for fraction in fraction_list(a ):
__magic_name__ = Fraction(a )
result *= frac.denominator / frac.numerator
return int(a )
if __name__ == "__main__":
print(solution())
| 98 | 0 |
from __future__ import annotations
def __lowercase ( a__ , a__ , a__ , a__ ) -> List[str]: # noqa: E741
while r - l > 1:
__SCREAMING_SNAKE_CASE = (l + r) // 2
if v[m] >= key:
__SCREAMING_SNAKE_CASE = m
else:
__SCREAMING_SNAKE_CASE = m # noqa: E741
return r
def __lowercase ( a__ ) -> int:
if len(snake_case_ ) == 0:
return 0
__SCREAMING_SNAKE_CASE = [0] * len(snake_case_ )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = v[0]
for i in range(1 , len(snake_case_ ) ):
if v[i] < tail[0]:
__SCREAMING_SNAKE_CASE = v[i]
elif v[i] > tail[length - 1]:
__SCREAMING_SNAKE_CASE = v[i]
length += 1
else:
__SCREAMING_SNAKE_CASE = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase_ :VQModel , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :Any , lowerCamelCase_ :int = 1 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Optional[int] , ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase_ , )
lowerCamelCase__ : List[Any] =latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__ : int =latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase__ : int ='eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase__ : Optional[int] ={}
if accepts_eta:
lowerCamelCase__ : Dict =eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase__ : Union[str, Any] =self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# predict the noise residual
lowerCamelCase__ : List[str] =self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Optional[Any] =self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase__ : Tuple =self.vqvae.decode(lowerCamelCase_ ).sample
lowerCamelCase__ : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ : List[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : int =self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ ) | 126 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 361 |
'''simple docstring'''
from typing import Any
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Optional[Any] = data
lowerCAmelCase__ : str = None
def __repr__( self ) -> str:
return F"""Node({self.data})"""
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ) -> List[Any]:
lowerCAmelCase__ : List[str] = None
def __iter__( self ) -> Any:
lowerCAmelCase__ : int = self.head
while node:
yield node.data
lowerCAmelCase__ : Optional[Any] = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(__UpperCAmelCase ) for item in self] )
def __getitem__( self ,__UpperCAmelCase ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
lowerCAmelCase__ : List[Any] = self.head
for _ in range(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = current.next
lowerCAmelCase__ : Dict = data
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
self.insert_nth(len(self ) ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
self.insert_nth(0 ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
lowerCAmelCase__ : str = Node(__UpperCAmelCase )
if self.head is None:
lowerCAmelCase__ : List[Any] = new_node
elif index == 0:
lowerCAmelCase__ : Dict = self.head # link new_node to head
lowerCAmelCase__ : int = new_node
else:
lowerCAmelCase__ : Any = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : Tuple = temp.next
lowerCAmelCase__ : str = temp.next
lowerCAmelCase__ : Optional[int] = new_node
def UpperCAmelCase_ ( self ) -> None: # print every node data
print(self )
def UpperCAmelCase_ ( self ) -> Any:
return self.delete_nth(0 )
def UpperCAmelCase_ ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self ,__UpperCAmelCase = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
lowerCAmelCase__ : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase__ : str = self.head.next
else:
lowerCAmelCase__ : int = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : Union[str, Any] = temp.next
lowerCAmelCase__ : Optional[Any] = temp.next
lowerCAmelCase__ : Any = temp.next.next
return delete_node.data
def UpperCAmelCase_ ( self ) -> bool:
return self.head is None
def UpperCAmelCase_ ( self ) -> None:
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : str = self.head
while current:
# Store the current node's next node.
lowerCAmelCase__ : Optional[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase__ : Optional[int] = prev
# Make the previous node be the current node
lowerCAmelCase__ : Optional[Any] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase__ : Optional[int] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase__ : List[str] = prev
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCamelCase ) == i
linked_list.insert_nth(UpperCamelCase , i + 1 )
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCamelCase ) == 9
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase__ : str = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = [
-9,
100,
Node(77345112 ),
"""dlrow olleH""",
7,
5555,
0,
-192.5_5555,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase__ : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase__ : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase__ : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase__ : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCamelCase )
assert (
str(UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase__ : str = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(UpperCamelCase )
print("""\nReading/changing Node data using indexing:""" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase__ : Dict = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(UpperCamelCase )
print(f"""length of linked_list is : {len(UpperCamelCase )}""" )
if __name__ == "__main__":
main()
| 184 | 0 |
'''simple docstring'''
import datasets
lowerCamelCase : str = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
lowerCamelCase : Any = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
lowerCamelCase : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCamelCase__ (self : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCamelCase , UpperCamelCase )}
| 2 | """simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
SCREAMING_SNAKE_CASE__ = {"mgp-str": 27}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase , lowerCAmelCase="[GO]" , lowerCAmelCase="[GO]" , lowerCAmelCase="[s]" , lowerCAmelCase="[GO]" , **lowerCAmelCase ):
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase , )
with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
snake_case = json.load(lowerCAmelCase )
snake_case = {v: k for k, v in self.vocab.items()}
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.vocab )
def snake_case ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for s in text:
char_tokens.extend(lowerCAmelCase )
return char_tokens
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase ) )
return
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' )
return (vocab_file,)
| 150 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """codegen"""
lowerCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=5_0_4_0_0 , _lowerCAmelCase : Tuple=2_0_4_8 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : Tuple=4_0_9_6 , _lowerCAmelCase : Any=2_8 , _lowerCAmelCase : Optional[int]=1_6 , _lowerCAmelCase : int=6_4 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[int]=1e-5 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : int=True , _lowerCAmelCase : str=5_0_2_5_6 , _lowerCAmelCase : Any=5_0_2_5_6 , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Dict , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =n_ctx
__lowercase =n_positions
__lowercase =n_embd
__lowercase =n_layer
__lowercase =n_head
__lowercase =n_inner
__lowercase =rotary_dim
__lowercase =activation_function
__lowercase =resid_pdrop
__lowercase =embd_pdrop
__lowercase =attn_pdrop
__lowercase =layer_norm_epsilon
__lowercase =initializer_range
__lowercase =use_cache
__lowercase =bos_token_id
__lowercase =eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : str = "default" , _lowerCAmelCase : List[PatchingSpec] = None , _lowerCAmelCase : bool = False , ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase)
if not getattr(self._config , 'pad_token_id' , _lowerCAmelCase):
# TODO: how to do that better?
__lowercase =0
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
__lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return self._config.n_layer
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._config.n_head
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =super(_lowerCAmelCase , self).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
# We need to order the input in the way they appears in the forward()
__lowercase =OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(self.num_layers)
]
__lowercase =common_inputs['attention_mask']
if self.use_past:
__lowercase =ordered_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
return ordered_inputs
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 1_3
| 48 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for attribute in key.split('.' ):
__lowercase =getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
__lowercase =getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
__lowercase =hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase =value
elif weight_type == "weight_g":
__lowercase =value
elif weight_type == "weight_v":
__lowercase =value
elif weight_type == "bias":
__lowercase =value
else:
__lowercase =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
__lowercase =fairseq_model.state_dict()
__lowercase =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase =None
for name, value in fairseq_dict.items():
__lowercase =False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowercase =True
elif name.split('.' )[0] == "proj":
__lowercase =fairseq_model.proj
__lowercase =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase =True
if "*" in mapped_key:
__lowercase =name.split(_lowerCAmelCase )[0].split('.' )[-2]
__lowercase =mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
__lowercase ='weight_g'
elif "weight_v" in name:
__lowercase ='weight_v'
elif "bias" in name:
__lowercase ='bias'
elif "weight" in name:
__lowercase ='weight'
else:
__lowercase =None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =full_name.split('conv_layers.' )[-1]
__lowercase =name.split('.' )
__lowercase =int(items[0] )
__lowercase =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase =emb.weight.shape
__lowercase =nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
__lowercase =emb.weight.data
return lin_layer
def _A ( _lowerCAmelCase ):
"""simple docstring"""
with open(_lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
__lowercase =f.readlines()
__lowercase =[line.split(' ' )[0] for line in lines]
__lowercase =len(_lowerCAmelCase )
__lowercase ={
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__lowercase =WavaVecaConfig.from_pretrained(_lowerCAmelCase )
__lowercase =SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
__lowercase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
__lowercase , __lowercase , __lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase =model[0].eval()
# set weights for wav2vec2 encoder
__lowercase =WavaVecaModel(_lowerCAmelCase )
__lowercase =recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
__lowercase =SpeechaTextaForCausalLM(_lowerCAmelCase )
__lowercase , __lowercase =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase =SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
__lowercase =False
# add projection layer
__lowercase =nn.Parameter(projection_layer.weight )
__lowercase =nn.Parameter(projection_layer.bias )
__lowercase =create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'vocab.json' ) , 'w' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , 'vocab.json' ) )
tokenizer.save_pretrained(_lowerCAmelCase )
__lowercase =hf_wavavec.config.to_dict()
__lowercase =tokenizer.pad_token_id
__lowercase =tokenizer.bos_token_id
__lowercase =tokenizer.eos_token_id
__lowercase ='speech_to_text_2'
__lowercase ='wav2vec2'
__lowercase =SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
lowerCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 48 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCAmelCase__ = numpy.array([0, 0])
UpperCAmelCase__ = numpy.array([0.5, 0.866_0254])
UpperCAmelCase__ = numpy.array([1, 0])
UpperCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _a ( a :list[numpy.ndarray] , a :int ) -> list[numpy.ndarray]:
a = initial_vectors
for _ in range(a ):
a = iteration_step(a )
return vectors
def _a ( a :list[numpy.ndarray] ) -> list[numpy.ndarray]:
a = []
for i, start_vector in enumerate(vectors[:-1] ):
a = vectors[i + 1]
new_vectors.append(a )
a = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _a ( a :numpy.ndarray , a :float ) -> numpy.ndarray:
a = numpy.radians(a )
a , a = numpy.cos(a ), numpy.sin(a )
a = numpy.array(((c, -s), (s, c)) )
return numpy.dot(a , a )
def _a ( a :list[numpy.ndarray] ) -> None:
a = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a , a = zip(*a )
plt.plot(a , a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 0 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split('/' )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__lowercase = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 325 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: list[int] , lowerCAmelCase: str )-> list[int]:
_snake_case : Dict = int(lowerCAmelCase )
# Initialize Result
_snake_case : Dict = []
# Traverse through all denomination
for denomination in reversed(lowerCAmelCase ):
# Find denominations
while int(lowerCAmelCase ) >= int(lowerCAmelCase ):
total_value -= int(lowerCAmelCase )
answer.append(lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase_ = []
lowerCAmelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
lowerCAmelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowerCAmelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase_ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
lowerCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 260 |
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> int:
while a != 0:
_snake_case , _snake_case : Optional[Any] = b % a, a
return b
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> int:
if gcd(lowerCAmelCase , lowerCAmelCase ) != 1:
_snake_case : Any = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowerCAmelCase )
_snake_case , _snake_case , _snake_case : Optional[Any] = 1, 0, a
_snake_case , _snake_case , _snake_case : Optional[int] = 0, 1, m
while va != 0:
_snake_case : Dict = ua // va
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 260 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31 | '''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Any , A : Optional[int]=None , A : Tuple=None , *A : Tuple , **A : List[str] ):
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_UpperCAmelCase : str = self.model.config
else:
_UpperCAmelCase : List[str] = config
_UpperCAmelCase : List[Any] = data_args
_UpperCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
_UpperCAmelCase : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase : Dict = label_smoothed_nll_loss
def _A ( self : Tuple , A : int ):
if self.optimizer is None:
_UpperCAmelCase : Tuple = ["bias", "LayerNorm.weight"]
_UpperCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_UpperCAmelCase : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase : List[str] = Adafactor
_UpperCAmelCase : List[Any] = {"scale_parameter": False, "relative_step": False}
else:
_UpperCAmelCase : List[str] = AdamW
_UpperCAmelCase : List[str] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_UpperCAmelCase : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase : List[Any] = OSS(
params=A , optim=A , **A , )
else:
_UpperCAmelCase : Union[str, Any] = optimizer_cls(A , **A )
if self.lr_scheduler is None:
_UpperCAmelCase : List[str] = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _A ( self : List[str] , A : Optional[int] ):
_UpperCAmelCase : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def _A ( self : Tuple ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _A ( self : Any , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase : List[str] = model(**A , use_cache=A )[0]
_UpperCAmelCase : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase : Any = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase : Optional[int] = model(**A , use_cache=A )[0]
_UpperCAmelCase : List[str] = torch.nn.functional.log_softmax(A , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _A ( self : List[str] , A : Optional[int] , A : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = inputs.pop("labels" )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._compute_loss(A , A , A )
return loss
def _A ( self : List[str] , A : nn.Module , A : Dict[str, Union[torch.Tensor, Any]] , A : bool , A : Optional[List[str]] = None , ):
_UpperCAmelCase : List[str] = self._prepare_inputs(A )
_UpperCAmelCase : Dict = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : int = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
_UpperCAmelCase : Any = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase : str = self._compute_loss(A , A , A )
_UpperCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _A ( self : Dict , A : int , A : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F""" padded to `max_length`={max_length}""" )
_UpperCAmelCase : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase : Tuple = tensor
return padded_tensor
| 31 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """efficientnet"""
def __init__( self : Tuple , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 600 , UpperCAmelCase : float = 2.0 , UpperCAmelCase : float = 3.1 , UpperCAmelCase : int = 8 , UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase : List[int] = [] , UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase : float = 0.2_5 , UpperCAmelCase : str = "swish" , UpperCAmelCase : int = 2560 , UpperCAmelCase : str = "mean" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 0.0_0_1 , UpperCAmelCase : float = 0.9_9 , UpperCAmelCase : float = 0.5 , UpperCAmelCase : float = 0.2 , **UpperCAmelCase : int , ) -> Any:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Union[str, Any] = width_coefficient
lowerCamelCase__ : Optional[Any] = depth_coefficient
lowerCamelCase__ : Union[str, Any] = depth_divisor
lowerCamelCase__ : Dict = kernel_sizes
lowerCamelCase__ : Union[str, Any] = in_channels
lowerCamelCase__ : Dict = out_channels
lowerCamelCase__ : Dict = depthwise_padding
lowerCamelCase__ : int = strides
lowerCamelCase__ : List[str] = num_block_repeats
lowerCamelCase__ : Optional[Any] = expand_ratios
lowerCamelCase__ : List[str] = squeeze_expansion_ratio
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : int = hidden_dim
lowerCamelCase__ : int = pooling_type
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Any = batch_norm_eps
lowerCamelCase__ : List[Any] = batch_norm_momentum
lowerCamelCase__ : int = dropout_rate
lowerCamelCase__ : int = drop_connect_rate
lowerCamelCase__ : List[Any] = sum(UpperCAmelCase ) * 4
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = version.parse("""1.11""" )
@property
def A_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A_ ( self : List[Any] ) -> float:
return 1e-5
| 45 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''The input training data file (a text file).'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
if self.train_file is not None:
SCREAMING_SNAKE_CASE : str = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE : str = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def __call__( self : str , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = """label""" if """label""" in features[0].keys() else """labels"""
SCREAMING_SNAKE_CASE : Tuple = [feature.pop(lowerCamelCase_ ) for feature in features]
SCREAMING_SNAKE_CASE : Tuple = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = len(features[0]["""input_ids"""] )
SCREAMING_SNAKE_CASE : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase_ )] for feature in features
]
SCREAMING_SNAKE_CASE : int = list(chain(*lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : str = self.tokenizer.pad(
lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
SCREAMING_SNAKE_CASE : Optional[int] = {k: v.view(lowerCamelCase_ , lowerCamelCase_ , -1 ) for k, v in batch.items()}
# Add back labels
SCREAMING_SNAKE_CASE : str = torch.tensor(lowerCamelCase_ , dtype=torch.intaa )
return batch
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase_ , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
SCREAMING_SNAKE_CASE : Tuple = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE : Any = data_args.validation_file
SCREAMING_SNAKE_CASE : Optional[Any] = data_args.train_file.split(""".""" )[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
lowerCamelCase_ , data_files=lowerCamelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
SCREAMING_SNAKE_CASE : Union[str, Any] = [f'''ending{i}''' for i in range(4 )]
SCREAMING_SNAKE_CASE : Dict = """sent1"""
SCREAMING_SNAKE_CASE : List[Any] = """sent2"""
if data_args.max_seq_length is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
SCREAMING_SNAKE_CASE : Any = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = [[context] * 4 for context in examples[context_name]]
SCREAMING_SNAKE_CASE : List[Any] = examples[question_header_name]
SCREAMING_SNAKE_CASE : List[str] = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase_ )
]
# Flatten out
SCREAMING_SNAKE_CASE : Tuple = list(chain(*lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = list(chain(*lowerCamelCase_ ) )
# Tokenize
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(
lowerCamelCase_ , lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
SCREAMING_SNAKE_CASE : Optional[int] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : List[str] = min(len(lowerCamelCase_ ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE : List[str] = train_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
SCREAMING_SNAKE_CASE : Optional[int] = train_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Any = min(len(lowerCamelCase_ ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE : List[Any] = eval_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
SCREAMING_SNAKE_CASE : List[str] = eval_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
SCREAMING_SNAKE_CASE : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = eval_predictions
SCREAMING_SNAKE_CASE : Any = np.argmax(lowerCamelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Tuple = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : int = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : List[str] = last_checkpoint
SCREAMING_SNAKE_CASE : Any = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE : Dict = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
SCREAMING_SNAKE_CASE : str = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics("""train""" , lowerCamelCase_ )
trainer.save_metrics("""train""" , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.evaluate()
SCREAMING_SNAKE_CASE : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics("""eval""" , lowerCamelCase_ )
trainer.save_metrics("""eval""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 323 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 1 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 1_000_000 ):
'''simple docstring'''
lowercase__ : List[str] = limit + 1
lowercase__ : Any = [0] * limit
for first_term in range(1 , SCREAMING_SNAKE_CASE_ ):
for n in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ : List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase__ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 350 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case_ = logging.get_logger(__name__)
snake_case_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
snake_case_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
snake_case_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
snake_case_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
snake_case_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
snake_case_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
snake_case_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : str = FLAX_MODEL_MAPPING
snake_case_ = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Optional[int] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : str = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : Tuple = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class SCREAMING_SNAKE_CASE__ (_BaseAutoModelClass ):
__lowerCamelCase : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 216 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_SCREAMING_SNAKE_CASE : Tuple = ['''small''', '''medium''', '''large''']
_SCREAMING_SNAKE_CASE : Optional[Any] = '''lm_head.decoder.weight'''
_SCREAMING_SNAKE_CASE : Any = '''lm_head.weight'''
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : str ) -> List[str]:
lowerCamelCase_ = torch.load(_lowerCamelCase )
lowerCamelCase_ = d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
_SCREAMING_SNAKE_CASE : str = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 183 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_SCREAMING_SNAKE_CASE : Tuple = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_SCREAMING_SNAKE_CASE : Tuple = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_SCREAMING_SNAKE_CASE : int = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_SCREAMING_SNAKE_CASE : List[Any] = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase ( self : List[str] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any]=0.9 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.5 ) -> int:
if NLTK_VERSION >= version.Version('3.6.5' ):
lowerCamelCase_ = [
meteor_score.single_meteor_score(
word_tokenize(__SCREAMING_SNAKE_CASE ) , word_tokenize(__SCREAMING_SNAKE_CASE ) , alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , gamma=__SCREAMING_SNAKE_CASE )
for ref, pred in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
else:
lowerCamelCase_ = [
meteor_score.single_meteor_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , gamma=__SCREAMING_SNAKE_CASE )
for ref, pred in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
return {"meteor": np.mean(__SCREAMING_SNAKE_CASE )}
| 183 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> tuple[float, list[float]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(range(len(_UpperCamelCase ) ) )
_SCREAMING_SNAKE_CASE =[v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )]
index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =[0] * len(_UpperCamelCase )
for i in index:
if weight[i] <= capacity:
_SCREAMING_SNAKE_CASE =1
max_value += value[i]
capacity -= weight[i]
else:
_SCREAMING_SNAKE_CASE =capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase : List[str] = {"UserAgent": UserAgent().random}
def _lowerCAmelCase ( _UpperCamelCase : str ) -> dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =script.contents[0]
_SCREAMING_SNAKE_CASE =json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A__ :
def __init__( self : int , _a : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =f"https://www.instagram.com/{username}/"
_SCREAMING_SNAKE_CASE =self.get_json()
def A ( self : Optional[int] ) -> dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =requests.get(self.url , headers=_a ).text
_SCREAMING_SNAKE_CASE =BeautifulSoup(_a , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : str ) -> str:
'''simple docstring'''
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
'''simple docstring'''
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def A ( self : List[Any] ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def A ( self : str ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def A ( self : Any ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def A ( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def A ( self : Tuple ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def A ( self : List[str] ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def A ( self : Dict ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def A ( self : Tuple ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def _lowerCAmelCase ( _UpperCamelCase : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_SCREAMING_SNAKE_CASE =InstagramUser(_UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Optional[int] = InstagramUser("github")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 114 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''MobileViTFeatureExtractor''']
_lowerCAmelCase = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 37 |
import logging
from transformers.configuration_utils import PretrainedConfig
__A = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "masked_bert"
def __init__(self : Dict , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : List[Any]=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1E-1_2 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str="topK" , UpperCAmelCase_ : List[str]="constant" , UpperCAmelCase_ : str=0.0 , **UpperCAmelCase_ : int , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Optional[int] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: List[Any] =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: str =attention_probs_dropout_prob
lowerCamelCase__: int =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: str =pruning_method
lowerCamelCase__: Union[str, Any] =mask_init
lowerCamelCase__: Optional[Any] =mask_scale
| 10 | 0 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__A =True
except ImportError:
__A =False
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( UpperCamelCase__ ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( a__ ):
@staticmethod
def snake_case__ ( _lowerCamelCase):
UpperCAmelCase__ : int = parser.add_parser("""add-new-model""")
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""")
add_new_model_parser.add_argument("""--testing_file""" , type=_lowerCamelCase , help="""Configuration file on which to run.""")
add_new_model_parser.add_argument(
"""--path""" , type=_lowerCamelCase , help="""Path to cookiecutter. Should only be used for testing purposes.""")
add_new_model_parser.set_defaults(func=_lowerCamelCase)
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , *_lowerCamelCase):
UpperCAmelCase__ : Dict = testing
UpperCAmelCase__ : Optional[int] = testing_file
UpperCAmelCase__ : Union[str, Any] = path
def snake_case__ ( self):
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""")
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""")
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase__ : int = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(_lowerCamelCase) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""")
UpperCAmelCase__ : str = (
Path(_lowerCamelCase).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
UpperCAmelCase__ : List[Any] = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_lowerCamelCase))
else:
with open(self._testing_file , """r""") as configuration_file:
UpperCAmelCase__ : Any = json.load(_lowerCamelCase)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=_lowerCamelCase , extra_context=_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""") as configuration_file:
UpperCAmelCase__ : Dict = json.load(_lowerCamelCase)
UpperCAmelCase__ : Dict = configuration["""lowercase_modelname"""]
UpperCAmelCase__ : Tuple = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f'''{directory}/configuration.json''')
UpperCAmelCase__ : Optional[int] = """PyTorch""" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase__ : Tuple = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase__ : List[Any] = """Flax""" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase__ : Optional[int] = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=_lowerCamelCase)
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w"""):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(_lowerCamelCase):
with open(_lowerCamelCase , """r""") as f:
UpperCAmelCase__ : int = f.readlines()
with open(_lowerCamelCase , """w""") as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_lowerCamelCase)
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''')
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''')
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''')
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''')
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''')
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''')
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''')
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''')
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''')
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
# Create temp file
UpperCAmelCase__ , UpperCAmelCase__ : Any = mkstemp()
UpperCAmelCase__ : List[Any] = False
with fdopen(_lowerCamelCase , """w""") as new_file:
with open(_lowerCamelCase) as old_file:
for line in old_file:
new_file.write(_lowerCamelCase)
if line_to_copy_below in line:
UpperCAmelCase__ : Tuple = True
for line_to_copy in lines_to_copy:
new_file.write(_lowerCamelCase)
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''')
# Copy the file permissions from the old file to the new file
copymode(_lowerCamelCase , _lowerCamelCase)
# Remove original file
remove(_lowerCamelCase)
# Move new file
move(_lowerCamelCase , _lowerCamelCase)
def skip_units(_lowerCamelCase):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_lowerCamelCase):
with open(_lowerCamelCase) as datafile:
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase__ : Union[str, Any] = line.split("""\"""")[1]
UpperCAmelCase__ : Union[str, Any] = skip_units(_lowerCamelCase)
elif "# Below: " in line and "##" not in line:
UpperCAmelCase__ : List[Any] = line.split("""\"""")[1]
UpperCAmelCase__ : Optional[int] = skip_units(_lowerCamelCase)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Tuple = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase__ : Any = []
elif "##" not in line:
lines_to_copy.append(_lowerCamelCase)
remove(_lowerCamelCase)
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''')
os.rmdir(_lowerCamelCase) | 283 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__A , __A , __A =False, False, False
@dataclass
class _snake_case :
lowerCAmelCase :Optional[int] = None
lowerCAmelCase :bool = True
lowerCAmelCase :bool = True
lowerCAmelCase :Optional[str] = None
# Automatically constructed
lowerCAmelCase :ClassVar[str] = "dict"
lowerCAmelCase :ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase :str = field(default='''Audio''' , init=a__ , repr=a__ )
def __call__( self):
return self.pa_type
def snake_case__ ( self , _lowerCamelCase):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""") from err
if isinstance(_lowerCamelCase , _lowerCamelCase):
return {"bytes": None, "path": value}
elif isinstance(_lowerCamelCase , _lowerCamelCase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ : Optional[int] = BytesIO()
sf.write(_lowerCamelCase , value["""array"""] , value["""sampling_rate"""] , format="""wav""")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""") is not None and os.path.isfile(value["""path"""]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm"""):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""")
if value.get("""bytes"""):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ : Tuple = np.frombuffer(value["""bytes"""] , dtype=np.intaa).astype(np.floataa) / 3_2767
else:
UpperCAmelCase__ : List[str] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""").astype(np.floataa) / 3_2767
UpperCAmelCase__ : List[str] = BytesIO(bytes())
sf.write(_lowerCamelCase , _lowerCamelCase , value["""sampling_rate"""] , format="""wav""")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""")}
elif value.get("""bytes""") is not None or value.get("""path""") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes"""), "path": value.get("""path""")}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''')
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""")
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = (value["""path"""], BytesIO(value["""bytes"""])) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''')
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""") from err
UpperCAmelCase__ : Dict = xsplitext(_lowerCamelCase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """)
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """)
if file is None:
UpperCAmelCase__ : int = token_per_repo_id or {}
UpperCAmelCase__ : Optional[int] = path.split("""::""")[-1]
try:
UpperCAmelCase__ : Dict = string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL)["""repo_id"""]
UpperCAmelCase__ : Dict = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ : List[Any] = None
with xopen(_lowerCamelCase , """rb""" , use_auth_token=_lowerCamelCase) as f:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = sf.read(_lowerCamelCase)
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = sf.read(_lowerCamelCase)
UpperCAmelCase__ : str = array.T
if self.mono:
UpperCAmelCase__ : List[Any] = librosa.to_mono(_lowerCamelCase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ : int = librosa.resample(_lowerCamelCase , orig_sr=_lowerCamelCase , target_sr=self.sampling_rate)
UpperCAmelCase__ : Tuple = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def snake_case__ ( self):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""")
return {
"bytes": Value("""binary"""),
"path": Value("""string"""),
}
def snake_case__ ( self , _lowerCamelCase):
if pa.types.is_string(storage.type):
UpperCAmelCase__ : Dict = pa.array([None] * len(_lowerCamelCase) , type=pa.binary())
UpperCAmelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_lowerCamelCase) , type=pa.string())
UpperCAmelCase__ : str = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("""array"""):
UpperCAmelCase__ : Optional[Any] = pa.array([Audio().encode_example(_lowerCamelCase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("""bytes""") >= 0:
UpperCAmelCase__ : int = storage.field("""bytes""")
else:
UpperCAmelCase__ : List[str] = pa.array([None] * len(_lowerCamelCase) , type=pa.binary())
if storage.type.get_field_index("""path""") >= 0:
UpperCAmelCase__ : List[Any] = storage.field("""path""")
else:
UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_lowerCamelCase) , type=pa.string())
UpperCAmelCase__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
return array_cast(_lowerCamelCase , self.pa_type)
def snake_case__ ( self , _lowerCamelCase):
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase):
with xopen(_lowerCamelCase , """rb""") as f:
UpperCAmelCase__ : int = f.read()
return bytes_
UpperCAmelCase__ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""]) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ : Optional[Any] = pa.array(
[os.path.basename(_lowerCamelCase) if path is not None else None for path in storage.field("""path""").to_pylist()] , type=pa.string() , )
UpperCAmelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null())
return array_cast(_lowerCamelCase , self.pa_type) | 283 | 1 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = CLIPConfig
UpperCamelCase__ : List[Any] = ['''CLIPEncoderLayer''']
def __init__( self , _A ):
'''simple docstring'''
super().__init__(_A )
__SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(config.vision_config )
__SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
__SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _A ( self , _A , _A , _A=0.5 , _A=0.5 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.vision_model(_A )[0]
__SCREAMING_SNAKE_CASE = self.p_head(_A )
__SCREAMING_SNAKE_CASE = nsfw_detected.flatten()
__SCREAMING_SNAKE_CASE = nsfw_detected > p_threshold
__SCREAMING_SNAKE_CASE = nsfw_detected.tolist()
if any(_A ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(_A ):
if nsfw_detected_:
__SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
__SCREAMING_SNAKE_CASE = self.w_head(_A )
__SCREAMING_SNAKE_CASE = watermark_detected.flatten()
__SCREAMING_SNAKE_CASE = watermark_detected > w_threshold
__SCREAMING_SNAKE_CASE = watermark_detected.tolist()
if any(_A ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(_A ):
if watermark_detected_:
__SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 257 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ : Optional[int] =getLogger(__name__)
lowerCAmelCase__ : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
def __lowercase ( a__ , a__ , a__ , a__ = 8 , a__ = DEFAULT_DEVICE , a__=False , a__="summarization" , a__=None , **a__ , ) -> Dict:
__SCREAMING_SNAKE_CASE = Path(a__ ).open('w' , encoding='utf-8' )
__SCREAMING_SNAKE_CASE = str(a__ )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a__ ).to(a__ )
if fpaa:
__SCREAMING_SNAKE_CASE = model.half()
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__SCREAMING_SNAKE_CASE = time.time()
# update config with task specific params
use_task_specific_params(a__ , a__ )
if prefix is None:
__SCREAMING_SNAKE_CASE = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(a__ , a__ ) ) ):
__SCREAMING_SNAKE_CASE = [prefix + text for text in examples_chunk]
__SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' , truncation=a__ , padding='longest' ).to(a__ )
__SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a__ , )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__SCREAMING_SNAKE_CASE = int(time.time() - start_time ) # seconds
__SCREAMING_SNAKE_CASE = len(a__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __lowercase ( ) -> Any:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __lowercase ( a__=True ) -> int:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('model_name' , type=a__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=a__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=a__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=a__ , required=a__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=a__ , required=a__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=a__ , required=a__ , default=a__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=a__ , required=a__ , default=a__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=a__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=a__ , default=8 , required=a__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=a__ , default=-1 , required=a__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=a__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_known_args()
__SCREAMING_SNAKE_CASE = parse_numeric_n_bool_cl_kwargs(a__ )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__SCREAMING_SNAKE_CASE = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__SCREAMING_SNAKE_CASE = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__SCREAMING_SNAKE_CASE = generate_summaries_or_translations(
a__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a__ , )
if args.reference_path is None:
return {}
# Compute scores
__SCREAMING_SNAKE_CASE = calculate_bleu if 'translation' in args.task else calculate_rouge
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.save_path ).readlines()]
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a__ )]
__SCREAMING_SNAKE_CASE = score_fn(a__ , a__ )
scores.update(a__ )
if args.dump_args:
scores.update(a__ )
if args.info:
__SCREAMING_SNAKE_CASE = args.info
if verbose:
print(a__ )
if args.score_path is not None:
json.dump(a__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 257 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[str] = """instructblip_vision_model"""
def __init__( self , a=1408 , a=6144 , a=39 , a=16 , a=224 , a=14 , a="gelu" , a=1e-6 , a=0.0 , a=1e-10 , a=True , **a , ):
super().__init__(**a)
lowercase__ : List[Any] = hidden_size
lowercase__ : Tuple = intermediate_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Tuple = attention_dropout
lowercase__ : int = layer_norm_eps
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Optional[Any] = qkv_bias
@classmethod
def snake_case_ ( cls , a , **a):
cls._set_token_in_kwargs(a)
lowercase__ , lowercase__ : Any = cls.get_config_dict(a , **a)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type') == "instructblip":
lowercase__ : Tuple = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(a , **a)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Union[str, Any] = """instructblip_qformer"""
def __init__( self , a=3_0522 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a=2 , a=1408 , **a , ):
super().__init__(pad_token_id=a , **a)
lowercase__ : List[str] = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : str = hidden_act
lowercase__ : str = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Dict = max_position_embeddings
lowercase__ : str = initializer_range
lowercase__ : List[Any] = layer_norm_eps
lowercase__ : List[Any] = position_embedding_type
lowercase__ : List[str] = cross_attention_frequency
lowercase__ : Any = encoder_hidden_size
@classmethod
def snake_case_ ( cls , a , **a):
cls._set_token_in_kwargs(a)
lowercase__ , lowercase__ : List[Any] = cls.get_config_dict(a , **a)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type') == "instructblip":
lowercase__ : Union[str, Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(a , **a)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Optional[int] = """instructblip"""
__lowerCamelCase : Dict = True
def __init__( self , a=None , a=None , a=None , a=32 , **a):
super().__init__(**a)
if vision_config is None:
lowercase__ : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.')
if qformer_config is None:
lowercase__ : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.')
if text_config is None:
lowercase__ : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
lowercase__ : List[str] = InstructBlipVisionConfig(**a)
lowercase__ : str = InstructBlipQFormerConfig(**a)
lowercase__ : Dict = text_config['model_type'] if 'model_type' in text_config else 'opt'
lowercase__ : Optional[Any] = CONFIG_MAPPING[text_model_type](**a)
lowercase__ : int = self.text_config.tie_word_embeddings
lowercase__ : Any = self.text_config.is_encoder_decoder
lowercase__ : Dict = num_query_tokens
lowercase__ : List[Any] = self.vision_config.hidden_size
lowercase__ : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase__ : List[str] = 1.0
lowercase__ : Optional[Any] = 0.02
@classmethod
def snake_case_ ( cls , a , a , a , **a , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **a , )
def snake_case_ ( self):
lowercase__ : int = copy.deepcopy(self.__dict__)
lowercase__ : str = self.vision_config.to_dict()
lowercase__ : Dict = self.qformer_config.to_dict()
lowercase__ : Union[str, Any] = self.text_config.to_dict()
lowercase__ : Optional[Any] = self.__class__.model_type
return output
| 216 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
snake_case_ = False
snake_case_ = True
snake_case_ = False
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
snake_case_ = parser.parse_args()
snake_case_ = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
snake_case_ = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
snake_case_ = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
snake_case_ = reader.read()
snake_case_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
snake_case_ = UNetaDModel(**config)
else:
snake_case_ = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
snake_case_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
snake_case_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
snake_case_ = config[key]
del config[key]
snake_case_ = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
snake_case_ = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
snake_case_ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
snake_case_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
snake_case_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
snake_case_ = param_value
snake_case_ = True
if not has_changed:
snake_case_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 216 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = '''timm_backbone'''
def __init__( self : str , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : Optional[Any] , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = backbone
SCREAMING_SNAKE_CASE_: Dict = num_channels
SCREAMING_SNAKE_CASE_: Optional[Any] = features_only
SCREAMING_SNAKE_CASE_: Optional[Any] = use_pretrained_backbone
SCREAMING_SNAKE_CASE_: List[Any] = True
SCREAMING_SNAKE_CASE_: List[str] = out_indices if out_indices is not None else (-1,)
| 13 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase : Union[str, Any] = "src/diffusers"
# Matches is_xxx_available()
lowerCamelCase : Dict = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCamelCase : Union[str, Any] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCamelCase : Any = "\n{0} = None\n"
lowerCamelCase : List[str] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCamelCase : str = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = _re_backend.findall(lowercase )
if len(lowercase ) == 0:
return None
return "_and_".join(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with open(os.path.join(lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase_ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ = 0
lowerCamelCase_ = {}
# Go through the end of the file
while line_index < len(lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
lowerCamelCase_ = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase ) and len(lines[line_index] ) > 1:
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _re_single_line_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase ) > 0:
lowerCamelCase_ = objects
else:
line_index += 1
return backend_specific_objects
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : str ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase , lowercase )
else:
return DUMMY_CLASS.format(lowercase , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int]=None ):
'''simple docstring'''
if backend_specific_objects is None:
lowerCamelCase_ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ = '[' + ', '.join(f"""\"{b}\"""" for b in backend.split('_and_' ) ) + ']'
lowerCamelCase_ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase , lowercase ) for o in objects] )
lowerCamelCase_ = dummy_file
return dummy_files
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int]=False ):
'''simple docstring'''
lowerCamelCase_ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
lowerCamelCase_ = os.path.join(lowercase , 'utils' )
lowerCamelCase_ = {
backend: os.path.join(lowercase , f"""dummy_{short_names.get(lowercase , lowercase )}_objects.py""" )
for backend in dummy_files.keys()
}
lowerCamelCase_ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase ):
with open(lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase_ = f.read()
else:
lowerCamelCase_ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py as the main """
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f"""diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py. Run `make fix-copies` """
'to fix this.' )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 204 | 0 |
import os
import string
import sys
a : List[Any] = 1 << 8
a : List[str] = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 2_7,
"""up""": 6_5 + ARROW_KEY_FLAG,
"""down""": 6_6 + ARROW_KEY_FLAG,
"""right""": 6_7 + ARROW_KEY_FLAG,
"""left""": 6_8 + ARROW_KEY_FLAG,
"""mod_int""": 9_1,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 5_0,
"""delete""": 5_1,
"""pg_up""": 5_3,
"""pg_down""": 5_4,
}
a : List[str] = KEYMAP["""up"""]
a : Union[str, Any] = KEYMAP["""left"""]
if sys.platform == "win32":
a : str = []
a : List[Any] = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(1_0):
a : Dict = ord(str(i))
def __lowerCamelCase ( ) -> List[str]:
if os.name == "nt":
import msvcrt
UpperCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_lowercase ) == 0:
# Read the keystroke
UpperCAmelCase : Tuple = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCAmelCase : List[str] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCAmelCase : Optional[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_lowercase )
if ord(_lowercase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
UpperCAmelCase : int = chr(KEYMAP["""esc"""] )
except KeyError:
UpperCAmelCase : Dict = cha[1]
else:
UpperCAmelCase : int = ch.decode(_lowercase )
else:
UpperCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCAmelCase : Any = sys.stdin.fileno()
UpperCAmelCase : Tuple = termios.tcgetattr(_lowercase )
try:
tty.setraw(_lowercase )
UpperCAmelCase : int = sys.stdin.read(1 )
finally:
termios.tcsetattr(_lowercase , termios.TCSADRAIN , _lowercase )
return ch
def __lowerCamelCase ( ) -> List[Any]:
UpperCAmelCase : List[str] = get_raw_chars()
if ord(_lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_lowercase ) == KEYMAP["esc"]:
UpperCAmelCase : Optional[Any] = get_raw_chars()
if ord(_lowercase ) == KEYMAP["mod_int"]:
UpperCAmelCase : Dict = get_raw_chars()
if ord(_lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_lowercase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 363 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a : Dict = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 338 | 0 |
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : str ) -> Tuple:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> Optional[Any]:
if (
not isinstance(_lowerCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | """simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :List[Any] = XLNetTokenizer
UpperCAmelCase_ :str = XLNetTokenizerFast
UpperCAmelCase_ :Optional[Any] = True
UpperCAmelCase_ :List[Any] = True
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ :Optional[int] = XLNetTokenizer(__A , keep_accents=__A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[Any] = """<s>"""
lowerCAmelCase_ :List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(__A ) , 1006 )
def __lowerCAmelCase ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[Any] = XLNetTokenizer(__A , keep_accents=__A )
lowerCAmelCase_ :int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] )
lowerCAmelCase_ :Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ :Any = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase_ :List[str] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = XLNetTokenizer(__A , do_lower_case=__A )
lowerCAmelCase_ :Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = XLNetTokenizer(__A , do_lower_case=__A )
lowerCAmelCase_ :Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
lowerCAmelCase_ :Any = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase_ :Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase_ :int = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :Dict = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# fmt: off
lowerCAmelCase_ :Union[str, Any] = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : int , __snake_case : str=True , __snake_case : Optional[Any]="pt" ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = {'add_prefix_space': True} if isinstance(__snake_case , __snake_case ) and not line.startswith(' ' ) else {}
UpperCAmelCase_ : str = padding_side
return tokenizer(
[line] , max_length=__snake_case , padding='max_length' if pad_to_max_length else None , truncation=__snake_case , return_tensors=__snake_case , add_special_tokens=__snake_case , **__snake_case , )
def lowercase__ ( __snake_case : int , __snake_case : Tuple , __snake_case : str=None , ):
'''simple docstring'''
UpperCAmelCase_ : str = input_ids.ne(__snake_case ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="train" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="" , ) -> Any:
super().__init__()
UpperCAmelCase_ : Optional[Any] = Path(_UpperCamelCase ).joinpath(type_path + '.source' )
UpperCAmelCase_ : Dict = Path(_UpperCamelCase ).joinpath(type_path + '.target' )
UpperCAmelCase_ : Optional[Any] = self.get_char_lens(self.src_file )
UpperCAmelCase_ : Dict = max_source_length
UpperCAmelCase_ : List[Any] = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : Tuple = prefix
if n_obs is not None:
UpperCAmelCase_ : int = self.src_lens[:n_obs]
UpperCAmelCase_ : Optional[int] = src_lang
UpperCAmelCase_ : List[str] = tgt_lang
def __len__( self ) -> Dict:
return len(self.src_lens )
def __getitem__( self , _UpperCamelCase ) -> Dict[str, torch.Tensor]:
UpperCAmelCase_ : Union[str, Any] = index + 1 # linecache starts at 1
UpperCAmelCase_ : Any = self.prefix + linecache.getline(str(self.src_file ) , _UpperCamelCase ).rstrip('\n' )
UpperCAmelCase_ : Optional[int] = linecache.getline(str(self.tgt_file ) , _UpperCamelCase ).rstrip('\n' )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase_ : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
)
UpperCAmelCase_ : Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
UpperCAmelCase_ : str = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_source_length , 'right' )
UpperCAmelCase_ : List[Any] = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_target_length , 'right' )
UpperCAmelCase_ : Optional[Any] = source_inputs['input_ids'].squeeze()
UpperCAmelCase_ : Any = target_inputs['input_ids'].squeeze()
UpperCAmelCase_ : int = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase ) -> Optional[int]:
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, torch.Tensor]:
UpperCAmelCase_ : List[str] = torch.stack([x['input_ids'] for x in batch] )
UpperCAmelCase_ : int = torch.stack([x['attention_mask'] for x in batch] )
UpperCAmelCase_ : Tuple = torch.stack([x['decoder_input_ids'] for x in batch] )
UpperCAmelCase_ : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase_ : Union[str, Any] = trim_batch(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = trim_batch(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__UpperCAmelCase = getLogger(__name__)
def lowercase__ ( __snake_case : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__snake_case ) )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = get_git_info()
save_json(__snake_case , os.path.join(__snake_case , 'git_log.json' ) )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : int , __snake_case : List[Any]=4 , **__snake_case : Optional[Any] ):
'''simple docstring'''
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case , indent=__snake_case , **__snake_case )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
with open(__snake_case ) as f:
return json.load(__snake_case )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = git.Repo(search_parent_directories=__snake_case )
UpperCAmelCase_ : Tuple = {
'repo_id': str(__snake_case ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowercase__ ( __snake_case : Callable , __snake_case : Iterable ):
'''simple docstring'''
return list(map(__snake_case , __snake_case ) )
def lowercase__ ( __snake_case : Tuple , __snake_case : Any ):
'''simple docstring'''
with open(__snake_case , 'wb' ) as f:
return pickle.dump(__snake_case , __snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
def remove_articles(__snake_case : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , __snake_case )
def white_space_fix(__snake_case : Any ):
return " ".join(text.split() )
def remove_punc(__snake_case : int ):
UpperCAmelCase_ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = normalize_answer(__snake_case ).split()
UpperCAmelCase_ : str = normalize_answer(__snake_case ).split()
UpperCAmelCase_ : Tuple = Counter(__snake_case ) & Counter(__snake_case )
UpperCAmelCase_ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase_ : List[Any] = 1.0 * num_same / len(__snake_case )
UpperCAmelCase_ : Tuple = 1.0 * num_same / len(__snake_case )
UpperCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str ):
'''simple docstring'''
return normalize_answer(__snake_case ) == normalize_answer(__snake_case )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[str] ):
'''simple docstring'''
assert len(__snake_case ) == len(__snake_case )
UpperCAmelCase_ : Any = 0
for hypo, pred in zip(__snake_case , __snake_case ):
em += exact_match_score(__snake_case , __snake_case )
if len(__snake_case ) > 0:
em /= len(__snake_case )
return {"em": em}
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase_ : str = 'dropout_rate'
for p in extra_params:
if getattr(__snake_case , __snake_case , __snake_case ):
if not hasattr(__snake_case , __snake_case ) and not hasattr(__snake_case , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__snake_case ) )
delattr(__snake_case , __snake_case )
continue
UpperCAmelCase_ : Optional[Any] = p if hasattr(__snake_case , __snake_case ) else equivalent_param[p]
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
delattr(__snake_case , __snake_case )
return hparams, config
| 29 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xlm-roberta"
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any]=30522 , __lowerCamelCase : List[Any]=768 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Dict="absolute" , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Any = position_embedding_type
lowerCamelCase__ : Any = use_cache
lowerCamelCase__ : Any = classifier_dropout
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 184 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class UpperCamelCase :
"""simple docstring"""
def __init__(self : Optional[int] , _A : str = None) -> int:
__snake_case : int = value
__snake_case : List[str] = None # Added in order to delete a node easier
__snake_case : int = None
__snake_case : Union[str, Any] = None
def __repr__(self : List[Any]) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value)
return pformat({f"{self.value}": (self.left, self.right)} , indent=1)
class UpperCamelCase :
"""simple docstring"""
def __init__(self : Tuple , _A : Optional[Any] = None) -> Tuple:
__snake_case : Dict = root
def __str__(self : Optional[int]) -> str:
return str(self.root)
def _lowercase (self : Any , _A : Union[str, Any] , _A : Tuple) -> None:
if new_children is not None: # reset its kids
__snake_case : int = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A): # If it is the right children
__snake_case : Dict = new_children
else:
__snake_case : List[Any] = new_children
else:
__snake_case : Union[str, Any] = new_children
def _lowercase (self : Dict , _A : List[str]) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase (self : Dict) -> bool:
return self.root is None
def _lowercase (self : Tuple , _A : Optional[int]) -> None:
__snake_case : List[str] = Node(_A) # create a new Node
if self.empty(): # if Tree is empty
__snake_case : str = new_node # set its root
else: # Tree is not empty
__snake_case : List[str] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__snake_case : Tuple = new_node # We insert the new node in a leaf
break
else:
__snake_case : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
__snake_case : Union[str, Any] = new_node
break
else:
__snake_case : Union[str, Any] = parent_node.right
__snake_case : Optional[Any] = parent_node
def _lowercase (self : Any , *_A : List[Any]) -> None:
for value in values:
self.__insert(_A)
def _lowercase (self : Any , _A : List[Any]) -> Node | None:
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.')
else:
__snake_case : Optional[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__snake_case : str = node.left if value < node.value else node.right
return node
def _lowercase (self : List[Any] , _A : Optional[Any] = None) -> Node | None:
if node is None:
if self.root is None:
return None
__snake_case : List[str] = self.root
if not self.empty():
while node.right is not None:
__snake_case : int = node.right
return node
def _lowercase (self : Any , _A : Union[str, Any] = None) -> Node | None:
if node is None:
__snake_case : List[str] = self.root
if self.root is None:
return None
if not self.empty():
__snake_case : int = self.root
while node.left is not None:
__snake_case : int = node.left
return node
def _lowercase (self : List[Any] , _A : Union[str, Any]) -> None:
__snake_case : Tuple = self.search(_A) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A)
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right)
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left)
else:
__snake_case : Optional[int] = self.get_max(
node.left) # Gets the max value of the left branch
self.remove(tmp_node.value) # type: ignore
__snake_case : Union[str, Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase (self : List[str] , _A : Optional[Any]) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left)
yield from self.preorder_traverse(node.right)
def _lowercase (self : Any , _A : int=None) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root)
else:
return traversal_function(self.root)
def _lowercase (self : int , _A : str , _A : Tuple) -> None:
if node:
self.inorder(_A , node.left)
arr.append(node.value)
self.inorder(_A , node.right)
def _lowercase (self : str , _A : Optional[Any] , _A : Any) -> int:
__snake_case : Any = []
self.inorder(_A , _A) # append all values to list using inorder traversal
return arr[k - 1]
def __UpperCAmelCase ( UpperCAmelCase_ : Node | None ) -> list[Node]:
'''simple docstring'''
__snake_case : Any = []
if curr_node is not None:
__snake_case : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : List[str] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__snake_case : Dict = BinarySearchTree()
for i in testlist:
t.insert(lowerCAmelCase__ )
# Prints all the elements of the list in order traversal
print(lowerCAmelCase__ )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 365 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Union[str, Any]) -> Optional[int]:
__snake_case : Optional[Any] = 0
def _lowercase (self : Tuple) -> int:
__snake_case : Optional[Any] = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(_A , _A)
def _lowercase (self : str) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[str] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Any) -> Optional[int]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Any = Path(_A) / 'preprocessor_config.json'
__snake_case : List[Any] = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : str = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case : List[Any] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case : List[str] = AutoImageProcessor.from_pretrained(_A).to_dict()
config_dict.pop('image_processor_type')
__snake_case : Optional[int] = CLIPImageProcessor(**_A)
# save in new folder
model_config.save_pretrained(_A)
config.save_pretrained(_A)
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A)
# make sure private variable is not incorrectly saved
__snake_case : int = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(_A , _A)
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = Path(_A) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
__snake_case : List[str] = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Optional[int]) -> Dict:
with self.assertRaisesRegex(
_A , 'clip-base is not a local folder and is not a valid model identifier'):
__snake_case : Tuple = AutoImageProcessor.from_pretrained('clip-base')
def _lowercase (self : str) -> int:
with self.assertRaisesRegex(
_A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case : str = AutoImageProcessor.from_pretrained(_A , revision='aaaaaa')
def _lowercase (self : List[Any]) -> str:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__snake_case : List[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def _lowercase (self : Optional[int]) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A):
__snake_case : Any = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A):
__snake_case : Tuple = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A , trust_remote_code=_A)
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor')
def _lowercase (self : int) -> Optional[int]:
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
AutoImageProcessor.register(_A , _A)
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Tuple = Path(_A) / 'preprocessor_config.json'
__snake_case : Dict = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = CustomImageProcessor.from_pretrained(_A)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : List[Any]) -> Tuple:
class UpperCamelCase ( lowercase ):
UpperCAmelCase : str = True
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# If remote code is not set, the default is to use local
__snake_case : Tuple = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__snake_case : List[Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(not hasattr(_A , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 95 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48 | 1 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_snake_case : List[str] = logging.getLogger(__name__)
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name', type=lowerCAmelCase_, default='wikitext', help='Name of the training. Explore datasets at: hf.co/datasets.', )
parser.add_argument(
'--dataset_config', type=lowerCAmelCase_, default='wikitext-103-raw-v1', help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path', type=lowerCAmelCase_, default='sayakpaul/unigram-tokenizer-wikitext', help='Tokenizer identifier. Can be a local filepath or a Hub identifier.', )
parser.add_argument(
'--shard_size', type=lowerCAmelCase_, default=1000, help='Number of entries to go in a single shard.', )
parser.add_argument('--split', type=lowerCAmelCase_, default='train', choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit', default=lowerCAmelCase_, type=lowerCAmelCase_, help='Limit the number of shards (used for debugging).', )
parser.add_argument(
'--max_length', type=lowerCAmelCase_, default=512, help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.', )
parser.add_argument(
'--output_dir', default='tf-tpu', type=lowerCAmelCase_, help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.', )
__lowerCAmelCase = parser.parse_args()
return args
def a_ ( lowerCAmelCase_ : Any ):
def fn(lowerCAmelCase_ : List[str] ):
return tokenizer(examples['text'] )
return fn
def a_ ( lowerCAmelCase_ : Any ):
__lowerCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
__lowerCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
__lowerCAmelCase = tf.train.Features(feature=lowerCAmelCase_ )
__lowerCAmelCase = tf.train.Example(features=lowerCAmelCase_ )
__lowerCAmelCase = example.SerializeToString()
records.append(lowerCAmelCase_ )
return records
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
__lowerCAmelCase = min(len(lowerCAmelCase_ ), args.limit )
__lowerCAmelCase = dataset.select(range(lowerCAmelCase_ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowerCAmelCase = os.path.join(args.output_dir, args.split )
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
else:
__lowerCAmelCase = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
__lowerCAmelCase = tokenize_function(lowerCAmelCase_ )
__lowerCAmelCase = dataset.map(lowerCAmelCase_, batched=lowerCAmelCase_, num_proc=4, remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCAmelCase_ : Dict ):
# Concatenate all texts.
__lowerCAmelCase = {k: sum(examples[k], [] ) for k in examples.keys()}
__lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowerCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowerCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0, lowerCAmelCase_, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowerCAmelCase = dataset_tokenized.map(lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=1000, num_proc=4 )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for shard in range(0, len(lowerCAmelCase_ ), args.shard_size ):
__lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size]
__lowerCAmelCase = len(dataset_snapshot['input_ids'] )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
__lowerCAmelCase = get_serialized_examples(lowerCAmelCase_ )
with tf.io.TFRecordWriter(lowerCAmelCase_ ) as out_file:
for i in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase = serialized_examples[i]
out_file.write(lowerCAmelCase_ )
print('Wrote file {} containing {} records'.format(lowerCAmelCase_, lowerCAmelCase_ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""", 'w' ) as f:
print(F"""Total {args.split} records: {total_records}""", file=lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = parse_args()
main(args)
| 207 |
from functools import lru_cache
@lru_cache
def a_ ( lowerCAmelCase_ : int ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 1 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__A : List[Any] = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
__A : List[str] = logging.WARNING
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = os.getenv('''DATASETS_VERBOSITY''' , _SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'Unknown option DATASETS_VERBOSITY={env_level_str}, '
f'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def lowercase ( ):
'''simple docstring'''
return __name__.split('''.''' )[0]
def lowercase ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if name is None:
_UpperCAmelCase = _get_library_name()
return logging.getLogger(_SCREAMING_SNAKE_CASE )
def lowercase ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_get_library_root_logger().setLevel(_SCREAMING_SNAKE_CASE )
def lowercase ( ):
'''simple docstring'''
return set_verbosity(_SCREAMING_SNAKE_CASE )
def lowercase ( ):
'''simple docstring'''
return set_verbosity(_SCREAMING_SNAKE_CASE )
def lowercase ( ):
'''simple docstring'''
return set_verbosity(_SCREAMING_SNAKE_CASE )
def lowercase ( ):
'''simple docstring'''
return set_verbosity(_SCREAMING_SNAKE_CASE )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = False
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _a :
"""simple docstring"""
def __init__( self : Any , *__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple )->List[str]: # pylint: disable=unused-argument
_UpperCAmelCase = args[0] if args else None
def __iter__( self : Union[str, Any] )->Optional[Any]:
return iter(self._iterator )
def __getattr__( self : List[str] , __UpperCamelCase : Optional[Any] )->List[str]:
def empty_fn(*__UpperCamelCase : List[str] , **__UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str )->str:
return self
def __exit__( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : str )->Optional[int]:
return
__A : int = True
class _a :
"""simple docstring"""
def __call__( self : Union[str, Any] , *__UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=False , **__UpperCamelCase : List[Any] )->Any:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__UpperCamelCase , **__UpperCamelCase )
else:
return EmptyTqdm(*__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[int] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Tuple )->Tuple:
_UpperCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[int] )->Optional[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__A : Dict = _tqdm_cls()
def lowercase ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def lowercase ( ):
'''simple docstring'''
global _tqdm_active
_UpperCAmelCase = True
def lowercase ( ):
'''simple docstring'''
global _tqdm_active
_UpperCAmelCase = False
| 260 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_SCREAMING_SNAKE_CASE ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
__A : Dict = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 260 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : str = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = "gptj"
UpperCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , A=5_04_00 , A=20_48 , A=40_96 , A=28 , A=16 , A=64 , A=None , A="gelu_new" , A=0.0 , A=0.0 , A=0.0 , A=1e-5 , A=0.02 , A=True , A=5_02_56 , A=5_02_56 , A=False , **A , ) -> int:
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = n_positions
lowerCamelCase = n_embd
lowerCamelCase = n_layer
lowerCamelCase = n_head
lowerCamelCase = n_inner
lowerCamelCase = rotary_dim
lowerCamelCase = activation_function
lowerCamelCase = resid_pdrop
lowerCamelCase = embd_pdrop
lowerCamelCase = attn_pdrop
lowerCamelCase = layer_norm_epsilon
lowerCamelCase = initializer_range
lowerCamelCase = use_cache
lowerCamelCase = bos_token_id
lowerCamelCase = eos_token_id
super().__init__(
bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A )
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , A , A = "default" , A = None , A = False , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(A , task=A , patching_specs=A , use_past=A )
if not getattr(self._config , """pad_token_id""" , A ):
# TODO: how to do that better?
lowerCamelCase = 0
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
lowerCamelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __A ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def __A ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def __A ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase = super(A , self ).generate_dummy_inputs(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
# We need to order the input in the way they appears in the forward()
lowerCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase = seqlen + 2
lowerCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
lowerCamelCase = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 )
return ordered_inputs
@property
def __A ( self ) -> int:
'''simple docstring'''
return 13
| 66 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 1_0)
lowercase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase_ = ""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 45 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 | 1 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class _a :
A = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
A = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
A = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = HfArgumentParser((ModelArguments,) )
((UpperCAmelCase_) , ): Tuple = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
UpperCAmelCase_: Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
UpperCAmelCase_: Dict = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
UpperCAmelCase_: Tuple = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
UpperCAmelCase_: Tuple = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
UpperCAmelCase_: Tuple = True
UpperCAmelCase_: Optional[int] = True
UpperCAmelCase_: Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
UpperCAmelCase_: Dict = decoder_config.decoder_start_token_id
UpperCAmelCase_: Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
UpperCAmelCase_: Any = decoder_config.bos_token_id
if pad_token_id is None:
UpperCAmelCase_: int = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
UpperCAmelCase_: Union[str, Any] = decoder_config.eos_token_id
UpperCAmelCase_: Any = decoder_start_token_id
UpperCAmelCase_: Dict = pad_token_id
UpperCAmelCase_: Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
UpperCAmelCase_: List[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
UpperCAmelCase_: List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 82 |
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: list[float] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = 0.00
UpperCAmelCase_: List[str] = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase_: Dict = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowerCAmelCase__ )
first_sum += 1 / float(lowerCAmelCase__ )
index += 1
return 1 / first_sum
def lowerCAmelCase_ (lowerCAmelCase__: list[float] ):
"""simple docstring"""
UpperCAmelCase_: Any = 0.00
UpperCAmelCase_: int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase_: int = F'Resistor at index {index} has a negative value!'
raise ValueError(lowerCAmelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
"""simple docstring"""
_lowercase : List[Any] = parent
_lowercase : List[str] = batch_size
_lowercase : List[str] = seq_length
_lowercase : str = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : str = use_token_type_ids
_lowercase : Optional[Any] = use_labels
_lowercase : int = vocab_size
_lowercase : Dict = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : str = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : Any = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : Union[str, Any] = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : Dict = num_labels
_lowercase : Optional[Any] = num_choices
_lowercase : int = scope
_lowercase : Dict = range_bbox
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : List[str] = bbox[i, j, 3]
_lowercase : Dict = bbox[i, j, 1]
_lowercase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : List[Any] = bbox[i, j, 2]
_lowercase : List[Any] = bbox[i, j, 0]
_lowercase : List[Any] = t
_lowercase : int = tf.convert_to_tensor(snake_case_ )
_lowercase : int = None
if self.use_input_mask:
_lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : List[Any] = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
_lowercase : str = None
if self.use_labels:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : List[Any] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Any = TFLayoutLMModel(config=snake_case_ )
_lowercase : str = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_lowercase : str = model(snake_case_ , snake_case_ , token_type_ids=snake_case_ )
_lowercase : Union[str, Any] = model(snake_case_ , snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = TFLayoutLMForMaskedLM(config=snake_case_ )
_lowercase : List[str] = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = self.num_labels
_lowercase : Dict = TFLayoutLMForSequenceClassification(config=snake_case_ )
_lowercase : int = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = self.num_labels
_lowercase : List[Any] = TFLayoutLMForTokenClassification(config=snake_case_ )
_lowercase : Any = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=snake_case_ )
_lowercase : Tuple = model(snake_case_ , snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
_lowercase
) : int = config_and_inputs
_lowercase : Optional[int] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Tuple = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Optional[int] = 10
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = TFLayoutLMModelTester(self )
_lowercase : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[Any] = TFLayoutLMModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _A ( ) -> str:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : List[str] = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
_lowercase : Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : str = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
_lowercase : Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : str = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
_lowercase : Any = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : int = model(input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[Any] = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : int = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case_ , atol=1E-3 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
_lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Union[str, Any] = model(
input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : Tuple = outputs.loss
_lowercase : Dict = (2,)
self.assertEqual(loss.shape , snake_case_ )
# test the shape of the logits
_lowercase : Dict = outputs.logits
_lowercase : List[str] = (2, 2)
self.assertEqual(logits.shape , snake_case_ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
_lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Optional[int] = model(
input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : List[str] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case_ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
_lowercase : Dict = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : str = model(input_ids=snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
# test the shape of the logits
_lowercase : Optional[Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case_ )
self.assertEqual(outputs.end_logits.shape , snake_case_ )
| 250 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=False ):
__a : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__a : Dict = ''''''
else:
__a : str = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
__a : Tuple = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__a : Dict = in_proj_weight[
: config.hidden_size, :
]
__a : str = in_proj_bias[: config.hidden_size]
__a : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : str = in_proj_weight[
-config.hidden_size :, :
]
__a : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
__a : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__a : str = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ):
__a : Optional[Any] = dct.pop(lowerCAmelCase__ )
__a : Optional[Any] = val
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ):
__a : str = ViTMSNConfig()
__a : List[Any] = 1_0_0_0
__a : Union[str, Any] = '''datasets/huggingface/label-files'''
__a : Optional[int] = '''imagenet-1k-id2label.json'''
__a : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ ) , '''r''' ) )
__a : Dict = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__a : Tuple = idalabel
__a : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__a : Union[str, Any] = 3_8_4
__a : Union[str, Any] = 1_5_3_6
__a : Union[str, Any] = 6
elif "l16" in checkpoint_url:
__a : str = 1_0_2_4
__a : Union[str, Any] = 4_0_9_6
__a : Optional[int] = 2_4
__a : int = 1_6
__a : List[Any] = 0.1
elif "b4" in checkpoint_url:
__a : int = 4
elif "l7" in checkpoint_url:
__a : int = 7
__a : List[Any] = 1_0_2_4
__a : Union[str, Any] = 4_0_9_6
__a : List[str] = 2_4
__a : int = 1_6
__a : Tuple = 0.1
__a : Dict = ViTMSNModel(lowerCAmelCase__ )
__a : Union[str, Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )['''target_encoder''']
__a : Optional[int] = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase__ )
__a : str = create_rename_keys(lowerCAmelCase__ , base_model=lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , base_model=lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
__a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : str = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
__a : Any = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ )
__a : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__a : str = model(**lowerCAmelCase__ )
__a : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__a : Dict = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
__a : List[str] = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
__a : str = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
__a : Union[str, Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
__a : Dict = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase__ , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase__ =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 216 | 0 |
import unittest
import numpy as np
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a = None , ):
snake_case_ : List[str] = np.shape(__a )
snake_case_ : Optional[Any] = np.shape(__a )
snake_case_ : List[str] = np.shape(__a )
if shape_a[0] != shape_b[0]:
snake_case_ : List[Any] = (
'Expected the same number of rows for A and B. '
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__a )
if shape_b[1] != shape_c[1]:
snake_case_ : Dict = (
'Expected the same number of columns for B and C. '
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__a )
snake_case_ : List[Any] = pseudo_inv
if a_inv is None:
try:
snake_case_ : Union[str, Any] = np.linalg.inv(__a )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Optional[int] ) -> None:
"""simple docstring"""
snake_case_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ : Optional[Any] = np.array([[2, 1], [6, 3]] )
snake_case_ : str = schur_complement(_A , _A , _A )
snake_case_ : str = np.block([[a, b], [b.T, c]] )
snake_case_ : Optional[int] = np.linalg.det(_A )
snake_case_ : Any = np.linalg.det(_A )
snake_case_ : Any = np.linalg.det(_A )
self.assertAlmostEqual(_A , det_a * det_s )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
snake_case_ : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_A ):
schur_complement(_A , _A , _A )
def UpperCAmelCase_ ( self : Optional[int] ) -> None:
"""simple docstring"""
snake_case_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_A ):
schur_complement(_A , _A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 359 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: int = AltDiffusionPipeline
__magic_name__: Any = TEXT_TO_IMAGE_PARAMS
__magic_name__: Any = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__: Any = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__: Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case_ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
snake_case_ : Any = CLIPTextModel(_A )
snake_case_ : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
snake_case_ : Dict = 77
snake_case_ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self : int , _A : Optional[int] , _A : int=0 ) -> Dict:
"""simple docstring"""
if str(_A ).startswith('mps' ):
snake_case_ : Union[str, Any] = torch.manual_seed(_A )
else:
snake_case_ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
snake_case_ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self : Dict ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Any = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ : Any = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ : Optional[Any] = RobertaSeriesModelWithTransformation(_A )
snake_case_ : Optional[Any] = text_encoder
snake_case_ : Optional[Any] = AltDiffusionPipeline(**_A )
snake_case_ : List[Any] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : Optional[Any] = self.get_dummy_inputs(_A )
snake_case_ : int = 'A photo of an astronaut'
snake_case_ : Tuple = alt_pipe(**_A )
snake_case_ : Any = output.images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Any = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Any = self.get_dummy_components()
snake_case_ : List[str] = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
snake_case_ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ : Tuple = RobertaSeriesModelWithTransformation(_A )
snake_case_ : Any = text_encoder
snake_case_ : Tuple = AltDiffusionPipeline(**_A )
snake_case_ : Dict = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : Dict = self.get_dummy_inputs(_A )
snake_case_ : Tuple = alt_pipe(**_A )
snake_case_ : int = output.images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Optional[int] = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : int ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=_A )
snake_case_ : Optional[int] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : str = 'A painting of a squirrel eating a burger'
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : str = alt_pipe([prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
snake_case_ : Any = output.images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : Union[str, Any] = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
snake_case_ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=_A , safety_checker=_A )
snake_case_ : List[str] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
snake_case_ : List[Any] = 'A painting of a squirrel eating a burger'
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : List[Any] = alt_pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='numpy' )
snake_case_ : Any = output.images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : List[Any] = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 88 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
a : Union[str, Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
a : Any = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
a : Union[str, Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
__UpperCAmelCase : Optional[Any] = len([g for position, g in enumerate(__lowerCamelCase ) if g == main_target[position]] )
return (item, float(__lowerCamelCase ))
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
__UpperCAmelCase : Optional[int] = random.randint(0 , len(__lowerCamelCase ) - 1 )
__UpperCAmelCase : Any = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCAmelCase : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : list[str] ):
__UpperCAmelCase : Tuple = list(__lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCAmelCase : Tuple = random.choice(__lowerCamelCase )
return "".join(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : tuple[str, float] , __lowerCamelCase : list[tuple[str, float]] , __lowerCamelCase : list[str] , ):
__UpperCAmelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
__UpperCAmelCase : str = int(parent_a[1] * 100 ) + 1
__UpperCAmelCase : int = 10 if child_n >= 10 else child_n
for _ in range(__lowerCamelCase ):
__UpperCAmelCase : Tuple = population_score[random.randint(0 , __lowerCamelCase )][0]
__UpperCAmelCase , __UpperCAmelCase : List[Any] = crossover(parent_a[0] , __lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(__lowerCamelCase , __lowerCamelCase ) )
pop.append(mutate(__lowerCamelCase , __lowerCamelCase ) )
return pop
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : list[str] , __lowerCamelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__UpperCAmelCase : List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(__lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCAmelCase : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCAmelCase : int = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(__lowerCamelCase )
# Generate random starting population.
__UpperCAmelCase : Dict = []
for _ in range(__lowerCamelCase ):
population.append("""""".join([random.choice(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCAmelCase : Tuple = [evaluate(__lowerCamelCase , __lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
__UpperCAmelCase : List[str] = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCAmelCase : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__lowerCamelCase )
# Normalize population score to be between 0 and 1.
__UpperCAmelCase : Union[str, Any] = [
(item, score / len(__lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(__lowerCamelCase ):
population.extend(select(population_score[int(__lowerCamelCase )] , __lowerCamelCase , __lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
a : Optional[Any] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
a : List[Any] = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
a ,a ,a : Optional[int] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 114 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a : Any = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return np.sqrt(np.sum((np.asarray(__lowerCamelCase ) - np.asarray(__lowerCamelCase )) ** 2 ) )
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return sum((va - va) ** 2 for va, va in zip(__lowerCamelCase , __lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase__ ( ):
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 114 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=[1, 1, 2] , __lowerCamelCase=1 , __lowerCamelCase=3_2 , __lowerCamelCase=4 , __lowerCamelCase=8 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=5_1_2 , __lowerCamelCase=3 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , __lowerCamelCase=False , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
_SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
_SCREAMING_SNAKE_CASE : Any = is_training
_SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
_SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : str = vocab_size
_SCREAMING_SNAKE_CASE : Tuple = block_sizes
_SCREAMING_SNAKE_CASE : List[str] = num_decoder_layers
_SCREAMING_SNAKE_CASE : Tuple = d_model
_SCREAMING_SNAKE_CASE : str = n_head
_SCREAMING_SNAKE_CASE : str = d_head
_SCREAMING_SNAKE_CASE : Optional[int] = d_inner
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Any = hidden_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
_SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout
_SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
_SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Tuple = num_labels
_SCREAMING_SNAKE_CASE : int = num_choices
_SCREAMING_SNAKE_CASE : Union[str, Any] = scope
_SCREAMING_SNAKE_CASE : List[str] = initializer_std
# Used in the tests to check the size of the first attention layer
_SCREAMING_SNAKE_CASE : Dict = n_head
# Used in the tests to check the size of the first hidden state
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_SCREAMING_SNAKE_CASE : Union[str, Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_SCREAMING_SNAKE_CASE : Any = self.num_hidden_layers + 2
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Any = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : int = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[str]:
_SCREAMING_SNAKE_CASE : Tuple = TFFunnelModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : Optional[int] = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Dict = TFFunnelModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Any = TFFunnelModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelBaseModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Dict = TFFunnelBaseModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : int = TFFunnelBaseModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Any:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelForPreTraining(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> str:
_SCREAMING_SNAKE_CASE : Dict = TFFunnelForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Any = self.num_labels
_SCREAMING_SNAKE_CASE : Any = TFFunnelForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Dict:
_SCREAMING_SNAKE_CASE : List[str] = self.num_choices
_SCREAMING_SNAKE_CASE : str = TFFunnelForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : int = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
_SCREAMING_SNAKE_CASE : Any = TFFunnelForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = TFFunnelForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) : Dict = config_and_inputs
_SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[str] = TFFunnelModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
@require_tf
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = TFFunnelModelTester(self , base=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) | 358 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) | 325 | 0 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 1000 ):
'''simple docstring'''
lowerCamelCase : Any = 2**power
lowerCamelCase : Dict = str(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = list(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = 0
for i in list_num:
sum_of_num += int(SCREAMING_SNAKE_CASE_ )
return sum_of_num
if __name__ == "__main__":
_snake_case = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
_snake_case = solution(power)
print('''Sum of the digits is: ''', result)
| 283 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "openai/whisper-base"
__A : str = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__A : Any = "transcriber"
__A : Any = WhisperProcessor
__A : int = WhisperForConditionalGeneration
__A : Any = ["audio"]
__A : List[str] = ["text"]
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor(__A , return_tensors="pt" ).input_features
def _snake_case ( self , __A ):
"""simple docstring"""
return self.model.generate(inputs=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
| 283 | 1 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCAmelCase_ : List[Any] = '''
import os
'''
lowerCAmelCase_ : Optional[Any] = '''
def foo():
import os
return False
'''
lowerCAmelCase_ : List[Any] = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
lowerCAmelCase_ : Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
lowerCAmelCase_ : List[Any] = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
lowerCAmelCase_ : Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
lowerCAmelCase_ : Optional[int] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
lowerCAmelCase_ : Optional[Any] = '''
import os
try:
import bar
except:
raise ValueError()
'''
lowerCAmelCase_ : List[str] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
lowerCAmelCase_ : Dict = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
lowerCAmelCase_ : Optional[Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , a_ )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = os.path.join(a_ , """test_file.py""" )
with open(a_ , """w""" ) as _tmp_file:
_tmp_file.write(a_ )
UpperCAmelCase = get_imports(a_ )
assert parsed_imports == ["os"]
| 364 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ : Any = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( a_ ):
_A : List[str] = 'tapas'
def __init__( self , snake_case__=3_05_22 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10_24 , snake_case__=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=10.0 , snake_case__=0 , snake_case__=1.0 , snake_case__=None , snake_case__=1.0 , snake_case__=False , snake_case__=None , snake_case__=1.0 , snake_case__=1.0 , snake_case__=False , snake_case__=False , snake_case__="ratio" , snake_case__=None , snake_case__=None , snake_case__=64 , snake_case__=32 , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_sizes
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase = positive_label_weight
UpperCAmelCase = num_aggregation_labels
UpperCAmelCase = aggregation_loss_weight
UpperCAmelCase = use_answer_as_supervision
UpperCAmelCase = answer_loss_importance
UpperCAmelCase = use_normalized_answer_loss
UpperCAmelCase = huber_loss_delta
UpperCAmelCase = temperature
UpperCAmelCase = aggregation_temperature
UpperCAmelCase = use_gumbel_for_cells
UpperCAmelCase = use_gumbel_for_aggregation
UpperCAmelCase = average_approximation_function
UpperCAmelCase = cell_selection_preference
UpperCAmelCase = answer_loss_cutoff
UpperCAmelCase = max_num_rows
UpperCAmelCase = max_num_columns
UpperCAmelCase = average_logits_per_cell
UpperCAmelCase = select_one_column
UpperCAmelCase = allow_empty_column_selection
UpperCAmelCase = init_cell_selection_weights_to_zero
UpperCAmelCase = reset_position_index_per_cell
UpperCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase = aggregation_labels
UpperCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , snake_case__ ):
UpperCAmelCase = {int(snake_case__ ): v for k, v in aggregation_labels.items()}
| 248 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowercase__ ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
lowercase__ ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
lowercase__ ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ):
return float((preds == labels).mean() )
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
__a : Optional[int] = simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Optional[int] = float(fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ):
__a : Optional[Any] = np.array(lowerCAmelCase__ )
__a : Optional[Any] = np.array(lowerCAmelCase__ )
__a : Any = en_sentvecs.shape[0]
# mean centering
__a : Optional[int] = en_sentvecs - np.mean(lowerCAmelCase__ , axis=0 )
__a : Any = in_sentvecs - np.mean(lowerCAmelCase__ , axis=0 )
__a : Dict = cdist(lowerCAmelCase__ , lowerCAmelCase__ , '''cosine''' )
__a : int = np.array(range(lowerCAmelCase__ ) )
__a : List[Any] = sim.argsort(axis=1 )[:, :1_0]
__a : int = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
def lowerCAmelCase (self : Tuple ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def lowerCAmelCase (self : str , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(snake_case_ , snake_case_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(snake_case_ , snake_case_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 216 |
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if n_term == "":
return []
__a : list = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f"1/{temp + 1}" if series else '''1''' )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 216 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase = True ) -> None:
'''simple docstring'''
lowercase_ = {} # dictionary of lists
lowercase_ = directed
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
self.adj_list[destination_vertex].append(UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
lowercase_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase )
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase_ = [destination_vertex]
lowercase_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
lowercase_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase_ = [destination_vertex]
lowercase_ = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 297 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 297 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
A =logging.getLogger(__name__)
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__a : bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a : bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _a :
__a : Optional[str] = field(default=UpperCamelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
__a : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__a : bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__a : Optional[int] = field(
default=UpperCamelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__a : Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__a : Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a : Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A ( self : List[str] ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _a :
__a : PreTrainedTokenizerBase
__a : Union[bool, str, PaddingStrategy] = True
__a : Optional[int] = None
__a : Optional[int] = None
def __call__( self : Dict , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(__SCREAMING_SNAKE_CASE ) for feature in features]
UpperCAmelCase = len(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(__SCREAMING_SNAKE_CASE )] for feature in features
]
UpperCAmelCase = list(chain(*__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa )
return batch
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
datasets.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
snake_case__ , data_files=snake_case__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"ending{i}" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_a : Tuple ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(snake_case__ )
]
# Flatten out
UpperCAmelCase = list(chain(*snake_case__ ) )
UpperCAmelCase = list(chain(*snake_case__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
snake_case__ , snake_case__ , truncation=snake_case__ , max_length=snake_case__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(snake_case__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(snake_case__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(snake_case__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(snake_case__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(snake_case__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=snake_case__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_a : Dict ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(snake_case__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
UpperCAmelCase = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('''train''' , snake_case__ )
trainer.save_metrics('''train''' , snake_case__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case__ )
UpperCAmelCase = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def snake_case_ (_a : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34 | lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( ) -> None:
lowerCAmelCase = input('''Enter message: ''' )
lowerCAmelCase = input('''Enter key [alphanumeric]: ''' )
lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase = '''encrypt'''
lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase = '''decrypt'''
lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = key.upper()
for symbol in message:
lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
lowerCAmelCase = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 338 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCAmelCase__ ( a__: BertModel , a__: str , a__: str ) -> str:
'''simple docstring'''
_UpperCAmelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
_UpperCAmelCase = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(a__ ):
os.makedirs(a__ )
_UpperCAmelCase = model.state_dict()
def to_tf_var_name(a__: str ):
for patt, repl in iter(a__ ):
_UpperCAmelCase = name.replace(a__ , a__ )
return F'''bert/{name}'''
def create_tf_var(a__: np.ndarray , a__: str , a__: tf.Session ):
_UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
_UpperCAmelCase = tf.get_variable(dtype=a__ , shape=tensor.shape , name=a__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_UpperCAmelCase = to_tf_var_name(a__ )
_UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_UpperCAmelCase = torch_tensor.T
_UpperCAmelCase = create_tf_var(tensor=a__ , name=a__ , session=a__ )
tf.keras.backend.set_value(a__ , a__ )
_UpperCAmelCase = session.run(a__ )
print(F'''Successfully created {tf_name}: {np.allclose(a__ , a__ )}''' )
_UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(a__ , os.path.join(a__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def lowerCAmelCase__ ( a__: int=None ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=a__ , required=a__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=a__ , default=a__ , required=a__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=a__ , required=a__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=a__ , required=a__ , help='Directory in which to save tensorflow model' )
_UpperCAmelCase = parser.parse_args(a__ )
_UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 185 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ :Tuple = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = ['''BeitFeatureExtractor''']
lowerCAmelCase__ :Optional[Any] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :int = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Tuple = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: int =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Any = XLNetTokenizer
a__ : Tuple = XLNetTokenizerFast
a__ : int = True
a__ : Optional[int] = True
def _lowercase (self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLNetTokenizer(__a , keep_accents=__a )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : Dict ):
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(__a ) , 1006 )
def _lowercase (self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = XLNetTokenizer(__a , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _lowercase (self : Any ):
UpperCAmelCase_ = XLNetTokenizer(__a , do_lower_case=__a )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def _lowercase (self : int ):
UpperCAmelCase_ = XLNetTokenizer(__a , do_lower_case=__a )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def _lowercase (self : str ):
UpperCAmelCase_ = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _lowercase (self : Optional[int] ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 1 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowercase :
# setable values
lowercase__ : Optional[int] = None
lowercase__ : Optional[jnp.ndarray] = None
lowercase__ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def __snake_case( cls : int ) -> List[Any]:
'''simple docstring'''
return cls()
@dataclass
class lowercase ( a ):
lowercase__ : jnp.ndarray
lowercase__ : jnp.ndarray
lowercase__ : KarrasVeSchedulerState
class lowercase ( a , a ):
@property
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
return True
@register_to_config
def __init__( self : Union[str, Any] , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : float = 100 , _UpperCamelCase : float = 1.0_0_7 , _UpperCamelCase : float = 80 , _UpperCamelCase : float = 0.0_5 , _UpperCamelCase : float = 50 , ) -> Union[str, Any]:
'''simple docstring'''
pass
def __snake_case( self : int ) -> Union[str, Any]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __snake_case( self : Optional[Any] , _UpperCamelCase : KarrasVeSchedulerState , _UpperCamelCase : int , _UpperCamelCase : Tuple = () ) -> KarrasVeSchedulerState:
'''simple docstring'''
SCREAMING_SNAKE_CASE = jnp.arange(0 , _UpperCamelCase )[::-1].copy()
SCREAMING_SNAKE_CASE = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_UpperCamelCase , schedule=jnp.array(_UpperCamelCase , dtype=jnp.floataa ) , timesteps=_UpperCamelCase , )
def __snake_case( self : Tuple , _UpperCamelCase : KarrasVeSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : float , _UpperCamelCase : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE = 0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE = random.split(_UpperCamelCase , num=1 )
SCREAMING_SNAKE_CASE = self.config.s_noise * random.normal(key=_UpperCamelCase , shape=sample.shape )
SCREAMING_SNAKE_CASE = sigma + gamma * sigma
SCREAMING_SNAKE_CASE = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __snake_case( self : Optional[Any] , _UpperCamelCase : KarrasVeSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE = (sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_UpperCamelCase , derivative=_UpperCamelCase , state=_UpperCamelCase )
def __snake_case( self : List[str] , _UpperCamelCase : KarrasVeSchedulerState , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : jnp.ndarray , _UpperCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE = (sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_UpperCamelCase , derivative=_UpperCamelCase , state=_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : KarrasVeSchedulerState , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict ) -> int:
'''simple docstring'''
raise NotImplementedError()
| 206 | from random import randint, random
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 5 , ):
SCREAMING_SNAKE_CASE = [[-1] * number_of_cells] # Create a highway without any car
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , 0 )
while i < number_of_cells:
SCREAMING_SNAKE_CASE = (
randint(0 , UpperCAmelCase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = highway_now[car_index + 1 :]
for cell in range(len(UpperCAmelCase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCAmelCase__ , -1 )
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : float , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# Beforce calculations, the highway is empty
SCREAMING_SNAKE_CASE = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
SCREAMING_SNAKE_CASE = min(highway_now[car_index] + 1 , UpperCAmelCase__ )
# Number of empty cell before the next car
SCREAMING_SNAKE_CASE = get_distance(UpperCAmelCase__ , UpperCAmelCase__ ) - 1
# We can't have the car causing an accident
SCREAMING_SNAKE_CASE = min(next_highway[car_index] , UpperCAmelCase__ )
if random() < probability:
# Randomly, a driver will slow down
SCREAMING_SNAKE_CASE = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = len(highway[0] )
for i in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = update(highway[i] , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
SCREAMING_SNAKE_CASE = (car_index + speed) % number_of_cells
# Commit the change of position
SCREAMING_SNAKE_CASE = speed
highway.append(UpperCAmelCase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase ( _snake_case : int="ro" , _snake_case : Dict="en" , _snake_case : int="wmt16" , _snake_case : List[str]=None ) ->None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__snake_case : Union[str, Any] = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__snake_case : Optional[Any] = datasets.load_dataset(_snake_case , _snake_case )
if save_dir is None:
__snake_case : int = f"""{dataset}-{pair}"""
__snake_case : Union[str, Any] = Path(_snake_case )
save_dir.mkdir(exist_ok=_snake_case )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__snake_case : Union[str, Any] = '''val''' if split == '''validation''' else split
__snake_case : List[str] = save_dir.joinpath(f"""{fn}.source""" )
__snake_case : int = save_dir.joinpath(f"""{fn}.target""" )
__snake_case : Union[str, Any] = src_path.open('''w+''' )
__snake_case : Union[str, Any] = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__snake_case : List[str] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 102 |
from math import pi
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 95 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
A__ = get_logger(__name__)
class a :
def __init__( self :Optional[int] ,__lowercase :Optional[str] = None ):
snake_case__ : int = (
os.path.join(a_ ,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
snake_case__ : int = Extractor
def __lowerCamelCase ( self :Any ,__lowercase :str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
snake_case__ : Dict = os.path.abspath(a_ )
return os.path.join(self.extract_dir ,hash_url_to_filename(a_ ) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :str ,__lowercase :bool ):
return force_extract or (
not os.path.isfile(a_ ) and not (os.path.isdir(a_ ) and os.listdir(a_ ))
)
def __lowerCamelCase ( self :Tuple ,__lowercase :str ,__lowercase :bool = False ):
snake_case__ : Union[str, Any] = self.extractor.infer_extractor_format(a_ )
if not extractor_format:
return input_path
snake_case__ : List[str] = self._get_output_path(a_ )
if self._do_extract(a_ ,a_ ):
self.extractor.extract(a_ ,a_ ,a_ )
return output_path
class a ( SCREAMING_SNAKE_CASE__ ):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls :Union[str, Any] ,__lowercase :Union[Path, str] ,**__lowercase :Tuple ):
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :Union[Path, str] ):
...
class a ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : List[bytes] = []
@staticmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :int ):
with open(a_ ,'''rb''' ) as f:
return f.read(a_ )
@classmethod
def __lowerCamelCase ( cls :Any ,__lowercase :Union[Path, str] ,__lowercase :bytes = b"" ):
if not magic_number:
snake_case__ : List[Any] = max(len(a_ ) for cls_magic_number in cls.magic_numbers )
try:
snake_case__ : Optional[Any] = cls.read_magic_number(a_ ,a_ )
except OSError:
return False
return any(magic_number.startswith(a_ ) for cls_magic_number in cls.magic_numbers )
class a ( SCREAMING_SNAKE_CASE__ ):
@classmethod
def __lowerCamelCase ( cls :Dict ,__lowercase :Union[Path, str] ,**__lowercase :List[Any] ):
return tarfile.is_tarfile(a_ )
@staticmethod
def __lowerCamelCase ( __lowercase :Optional[Any] ,__lowercase :int ):
def resolved(__lowercase :str ) -> str:
return os.path.realpath(os.path.abspath(a_ ) )
def badpath(__lowercase :str ,__lowercase :str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a_ ,a_ ) ).startswith(a_ )
def badlink(__lowercase :Union[str, Any] ,__lowercase :str ) -> bool:
# Links are interpreted relative to the directory containing the link
snake_case__ : Dict = resolved(os.path.join(a_ ,os.path.dirname(info.name ) ) )
return badpath(info.linkname ,base=a_ )
snake_case__ : List[str] = resolved(a_ )
for finfo in members:
if badpath(finfo.name ,a_ ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(a_ ,a_ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(a_ ,a_ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :Union[Path, str] ):
os.makedirs(a_ ,exist_ok=a_ )
snake_case__ : Any = tarfile.open(a_ )
tar_file.extractall(a_ ,members=TarExtractor.safemembers(a_ ,a_ ) )
tar_file.close()
class a ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : int = [B'\x1F\x8B']
@staticmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :Union[Path, str] ):
with gzip.open(a_ ,'''rb''' ) as gzip_file:
with open(a_ ,'''wb''' ) as extracted_file:
shutil.copyfileobj(a_ ,a_ )
class a ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : Optional[Any] = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls :Dict ,__lowercase :Union[Path, str] ,__lowercase :bytes = b"" ):
if super().is_extractable(a_ ,magic_number=a_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a_ ,'''rb''' ) as fp:
snake_case__ : Optional[int] = _EndRecData(a_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
snake_case__ : Optional[Any] = fp.read(a_ ) # CD is where we expect it to be
if len(a_ ) == sizeCentralDir:
snake_case__ : Dict = struct.unpack(a_ ,a_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :Union[Path, str] ):
os.makedirs(a_ ,exist_ok=a_ )
with zipfile.ZipFile(a_ ,'''r''' ) as zip_file:
zip_file.extractall(a_ )
zip_file.close()
class a ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : str = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :Union[Path, str] ):
with lzma.open(a_ ) as compressed_file:
with open(a_ ,'''wb''' ) as extracted_file:
shutil.copyfileobj(a_ ,a_ )
class a ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : Optional[Any] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(a_ ,exist_ok=a_ )
snake_case__ : Any = rarfile.RarFile(a_ )
rf.extractall(a_ )
rf.close()
class a ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : Optional[Any] = [B'\x28\xb5\x2F\xFD']
@staticmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
snake_case__ : Optional[int] = zstd.ZstdDecompressor()
with open(a_ ,'''rb''' ) as ifh, open(a_ ,'''wb''' ) as ofh:
dctx.copy_stream(a_ ,a_ )
class a ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : str = [B'\x42\x5A\x68']
@staticmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :Union[Path, str] ):
with bza.open(a_ ,'''rb''' ) as compressed_file:
with open(a_ ,'''wb''' ) as extracted_file:
shutil.copyfileobj(a_ ,a_ )
class a ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : List[Any] = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(a_ ,exist_ok=a_ )
with pyazr.SevenZipFile(a_ ,'''r''' ) as archive:
archive.extractall(a_ )
class a ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : Union[str, Any] = [B'\x04\x22\x4D\x18']
@staticmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(a_ ,'''rb''' ) as compressed_file:
with open(a_ ,'''wb''' ) as extracted_file:
shutil.copyfileobj(a_ ,a_ )
class a :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__lowerCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls :List[str] ):
return max(
len(a_ )
for extractor in cls.extractors.values()
if issubclass(a_ ,a_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( __lowercase :Union[Path, str] ,__lowercase :int ):
try:
return MagicNumberBaseExtractor.read_magic_number(a_ ,magic_number_length=a_ )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls :Tuple ,__lowercase :Union[Path, str] ,__lowercase :bool = False ):
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' ,category=a_ ,)
snake_case__ : Any = cls.infer_extractor_format(a_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls :List[Any] ,__lowercase :Union[Path, str] ): # <Added version="2.4.0"/>
snake_case__ : str = cls._get_magic_number_max_length()
snake_case__ : List[str] = cls._read_magic_number(a_ ,a_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a_ ,magic_number=a_ ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls :int ,__lowercase :Union[Path, str] ,__lowercase :Union[Path, str] ,__lowercase :Optional[str] = None ,__lowercase :Optional[BaseExtractor] = "deprecated" ,):
os.makedirs(os.path.dirname(a_ ) ,exist_ok=a_ )
# Prevent parallel extractions
snake_case__ : int = str(Path(a_ ).with_suffix('''.lock''' ) )
with FileLock(a_ ):
shutil.rmtree(a_ ,ignore_errors=a_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a_ ,a_ ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' ,category=a_ ,)
snake_case__ : Any = extractor if extractor != '''deprecated''' else extractor_format
else:
snake_case__ : Union[str, Any] = cls.extractors[extractor_format]
return extractor.extract(a_ ,a_ )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' ,category=a_ ,)
for extractor in cls.extractors.values():
if extractor.is_extractable(a_ ):
return extractor.extract(a_ ,a_ )
| 359 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a :
def __init__( self :str ,__lowercase :Optional[Any] ,__lowercase :List[Any]=1_3 ,__lowercase :str=7 ,__lowercase :Dict=True ,__lowercase :Any=True ,__lowercase :str=True ,__lowercase :Any=True ,__lowercase :Tuple=9_9 ,__lowercase :List[str]=3_2 ,__lowercase :int=5 ,__lowercase :Union[str, Any]=4 ,__lowercase :List[str]=4 ,__lowercase :Any="gelu" ,__lowercase :Any=0.0 ,__lowercase :Tuple=0.1 ,__lowercase :str=True ,__lowercase :Tuple=5_1_2 ,__lowercase :Dict=1_6 ,__lowercase :Tuple=2 ,__lowercase :List[str]=0.02 ,__lowercase :Dict=3 ,__lowercase :Optional[int]=4 ,__lowercase :Tuple=None ,):
snake_case__ : Optional[int] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : Tuple = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : List[Any] = use_token_type_ids
snake_case__ : str = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = intermediate_multiple_size
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = hidden_dropout
snake_case__ : str = attention_dropout
snake_case__ : List[str] = weight_tying
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : str = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : int = num_labels
snake_case__ : int = num_choices
snake_case__ : int = scope
def __lowerCamelCase ( self :List[str] ):
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCamelCase ( self :int ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def __lowerCamelCase ( self :List[Any] ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Optional[Any] ):
snake_case__ : Union[str, Any] = GPTNeoXJapaneseModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Union[str, Any] = model(__lowercase ,attention_mask=__lowercase )
snake_case__ : Optional[Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :Tuple ,__lowercase :Union[str, Any] ):
snake_case__ : Any = True
snake_case__ : Tuple = GPTNeoXJapaneseModel(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : str = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ,__lowercase :Any ):
snake_case__ : Any = GPTNeoXJapaneseForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Any ,__lowercase :int ,__lowercase :List[str] ):
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = GPTNeoXJapaneseForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,use_cache=__lowercase )
snake_case__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case__ : int = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
snake_case__ : Optional[int] = torch.cat([input_ids, next_tokens] ,dim=-1 )
snake_case__ : Optional[int] = torch.cat([input_mask, next_mask] ,dim=-1 )
snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,output_hidden_states=__lowercase )
snake_case__ : Tuple = output_from_no_past['''hidden_states'''][0]
snake_case__ : List[str] = model(
__lowercase ,attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0]
# select random slice
snake_case__ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = config_and_inputs
snake_case__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__lowerCAmelCase : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : int = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : str = False
def __lowerCamelCase ( self :Any ):
snake_case__ : int = GPTNeoXJapaneseModelTester(self )
snake_case__ : Any = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :str ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
# This regression test was failing with PyTorch < 1.3
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowercase )
@slow
def __lowerCamelCase ( self :Dict ):
snake_case__ : str = '''abeja/gpt-neox-japanese-2.7b'''
snake_case__ : int = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
snake_case__ : Optional[int] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
snake_case__ : Optional[int] = GPTNeoXJapaneseTokenizer.from_pretrained(__lowercase )
snake_case__ : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowercase )
snake_case__ : Optional[int] = []
for prompt in prompts:
snake_case__ : Dict = tokenizer(__lowercase ,return_tensors='''pt''' ).input_ids
snake_case__ : Union[str, Any] = model.generate(__lowercase ,max_length=5_0 )
snake_case__ : int = tokenizer.batch_decode(__lowercase ,skip_special_tokens=__lowercase )
predicted_outputs += generated_string
self.assertListEqual(__lowercase ,__lowercase )
| 44 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( A__ ,A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = StableDiffusionInstructPixaPixPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
lowercase__ = CLIPTextModel(lowerCamelCase )
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase__ ( self : Tuple, lowerCamelCase : int, lowerCamelCase : Optional[int]=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowercase__ = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase__ = Image.fromarray(np.uinta(lowerCamelCase ) ).convert('''RGB''' )
if str(lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(lowerCamelCase )
else:
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = sd_pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = '''french fries'''
lowercase__ = sd_pipe(**lowerCamelCase, negative_prompt=lowerCamelCase )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = [inputs['''prompt''']] * 2
lowercase__ = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
lowercase__ = torch.from_numpy(lowerCamelCase ).unsqueeze(0 ).to(lowerCamelCase )
lowercase__ = image / 2 + 0.5
lowercase__ = image.permute(0, 3, 1, 2 )
lowercase__ = image.repeat(2, 1, 1, 1 )
lowercase__ = sd_pipe(**lowerCamelCase ).images
lowercase__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase__ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = EulerAncestralDiscreteScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''' )
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = sd_pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [round(lowerCamelCase, 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
lowercase__ = VaeImageProcessor(do_resize=lowerCamelCase, do_normalize=lowerCamelCase )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase, input_image_type='''pt''' ) )[0]
lowercase__ = components['''vae''']
lowercase__ = self.get_dummy_inputs_by_type(lowerCamelCase, input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase__ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase__ = pipe(**lowerCamelCase )[0]
lowercase__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase, 1E-4, '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[str]=0 ):
'''simple docstring'''
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
lowercase__ = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCamelCase )
lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCamelCase )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = 0
def callback_fn(lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor ) -> None:
lowercase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase__ = False
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCamelCase, torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
pipe(**lowerCamelCase, callback=lowerCamelCase, callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCamelCase, torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ = inputs['''image'''].resize((504, 504) )
lowercase__ = '''timbrooks/instruct-pix2pix'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowercase__ = pipe(**lowerCamelCase )
lowercase__ = output.images[0]
lowercase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase__ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 207 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : str=14, lowerCamelCase : List[str]=7, lowerCamelCase : Dict=True, lowerCamelCase : List[str]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Any=True, lowerCamelCase : List[str]=99, lowerCamelCase : Optional[Any]=32, lowerCamelCase : Optional[int]=5, lowerCamelCase : List[Any]=4, lowerCamelCase : List[Any]=37, lowerCamelCase : List[str]="gelu", lowerCamelCase : Any=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : List[Any]=512, lowerCamelCase : List[str]=16, lowerCamelCase : Dict=2, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : Optional[int]=3, lowerCamelCase : Dict=4, lowerCamelCase : List[Any]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_token_type_ids
lowercase__ = use_input_mask
lowercase__ = use_labels
lowercase__ = use_mc_token_ids
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = self.vocab_size - 1
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
if self.use_mc_token_ids:
lowercase__ = ids_tensor([self.batch_size, self.num_choices], self.seq_length )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
lowercase__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], *lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = CTRLModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
model(lowerCamelCase, token_type_ids=lowerCamelCase, head_mask=lowerCamelCase )
model(lowerCamelCase, token_type_ids=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ), config.n_layer )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], *lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = CTRLLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def lowercase__ ( self : List[str], lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Any, *lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = CTRLForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase__ = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = CTRLModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, n_embd=37 )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = CTRLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(lowerCamelCase )
lowercase__ = torch.tensor(
[[11_859, 0, 1_611, 8]], dtype=torch.long, device=lowerCamelCase ) # Legal the president is
lowercase__ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase__ = model.generate(lowerCamelCase, do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].tolist(), lowerCamelCase )
| 207 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase__ :Optional[int] = False, False, False
@dataclass
class __a :
_a : Optional[int] = None
_a : str = True
_a : int = True
_a : Tuple = None
# Automatically constructed
_a : Optional[int] = 'dict'
_a : Tuple = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_a : Union[str, Any] = field(default='Audio' , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def __call__( self ) -> Optional[int]:
"""simple docstring"""
return self.pa_type
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"bytes": None, "path": value}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_UpperCAmelCase = BytesIO()
sf.write(_SCREAMING_SNAKE_CASE , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_UpperCAmelCase = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
_UpperCAmelCase = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32767
_UpperCAmelCase = BytesIO(bytes() )
sf.write(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> int:
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
_UpperCAmelCase = (value["path"], BytesIO(value['bytes'] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
_UpperCAmelCase = xsplitext(_SCREAMING_SNAKE_CASE )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ' )
if file is None:
_UpperCAmelCase = token_per_repo_id or {}
_UpperCAmelCase = path.split('::' )[-1]
try:
_UpperCAmelCase = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["repo_id"]
_UpperCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_UpperCAmelCase = None
with xopen(_SCREAMING_SNAKE_CASE , 'rb' , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
_UpperCAmelCase = sf.read(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = sf.read(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = array.T
if self.mono:
_UpperCAmelCase = librosa.to_mono(_SCREAMING_SNAKE_CASE )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_UpperCAmelCase = librosa.resample(_SCREAMING_SNAKE_CASE , orig_sr=_SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate )
_UpperCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if pa.types.is_string(storage.type ):
_UpperCAmelCase = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
_UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_UpperCAmelCase = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
_UpperCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
_UpperCAmelCase = pa.array([Audio().encode_example(_SCREAMING_SNAKE_CASE ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_UpperCAmelCase = storage.field('bytes' )
else:
_UpperCAmelCase = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_UpperCAmelCase = storage.field('path' )
else:
_UpperCAmelCase = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
_UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
_UpperCAmelCase = f.read()
return bytes_
_UpperCAmelCase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_UpperCAmelCase = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
_UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
| 354 |
import math
import random
def lowerCAmelCase__ ( a__: float , a__: bool = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase__ :Optional[Any] = 0.02
def lowerCAmelCase__ ( a__: int , a__: int ) -> float:
'''simple docstring'''
_UpperCAmelCase = float(2 * (random.randint(1 , 1_0_0 )) - 1 )
for _ in range(a__ ):
# Forward propagation
_UpperCAmelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_UpperCAmelCase = (expected / 1_0_0) - layer_a
# Error delta
_UpperCAmelCase = layer_1_error * sigmoid_function(a__ , a__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ :List[Any] = int(input('''Expected value: '''))
lowerCAmelCase__ :Any = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 185 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Union[str, Any] , snake_case: Optional[Any] , snake_case: List[str]=7 , snake_case: List[str]=3 , snake_case: Tuple=18 , snake_case: Optional[int]=30 , snake_case: Optional[Any]=400 , snake_case: Tuple=True , snake_case: int=None , snake_case: Optional[int]=True , snake_case: int=None , ) -> Dict:
snake_case_ :Optional[Any] = size if size is not None else {"""shortest_edge""": 20}
snake_case_ :int = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
snake_case_ :List[Any] = parent
snake_case_ :Any = batch_size
snake_case_ :List[Any] = num_channels
snake_case_ :Any = image_size
snake_case_ :Tuple = min_resolution
snake_case_ :Optional[Any] = max_resolution
snake_case_ :Tuple = do_resize
snake_case_ :Any = size
snake_case_ :List[str] = do_center_crop
snake_case_ :Dict = crop_size
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Any = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self: str ) -> Dict:
snake_case_ :Any = MobileNetVaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self: Dict ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , """do_resize""" ) )
self.assertTrue(hasattr(snake_case , """size""" ) )
self.assertTrue(hasattr(snake_case , """do_center_crop""" ) )
self.assertTrue(hasattr(snake_case , """crop_size""" ) )
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
snake_case_ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
snake_case_ :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self: Tuple ) -> Tuple:
pass
def lowerCAmelCase_ ( self: Tuple ) -> List[Any]:
# Initialize image_processing
snake_case_ :str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ :int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
snake_case_ :int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ :List[Any] = image_processing(snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self: Any ) -> Tuple:
# Initialize image_processing
snake_case_ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
snake_case_ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ :int = image_processing(snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self: str ) -> List[str]:
# Initialize image_processing
snake_case_ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
snake_case_ :List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ :int = image_processing(snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 66 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__a = pd.read_csv("sample_data.csv", header=None)
__a = df.shape[:1][0]
# If you're using some other dataset input the target column
__a = df.iloc[:, 1:2]
__a = actual_data.values.reshape(len_data, 1)
__a = MinMaxScaler().fit_transform(actual_data)
__a = 10
__a = 5
__a = 20
__a = len_data - periods * look_back
__a = actual_data[:division]
__a = actual_data[division - look_back :]
__a , __a = [], []
__a , __a = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__a = np.array(train_x)
__a = np.array(test_x)
__a = np.array([list(i.ravel()) for i in train_y])
__a = np.array([list(i.ravel()) for i in test_y])
__a = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__a = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__a = model.predict(x_test)
| 66 | 1 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[str] ,_lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : Dict ) -> Dict:
_lowerCAmelCase : Optional[Any] = TapasConfig.from_json_file(lowercase_ )
# set absolute/relative position embeddings parameter
_lowerCAmelCase : str = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_lowerCAmelCase : List[str] = TapasForQuestionAnswering(config=lowercase_ )
elif task == "WTQ":
# run_task_main.py hparams
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : List[str] = True
# hparam_utils.py hparams
_lowerCAmelCase : str = 0.66_46_94
_lowerCAmelCase : int = 0.20_79_51
_lowerCAmelCase : Dict = 0.12_11_94
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Any = 0.0_35_25_13
_lowerCAmelCase : Any = TapasForQuestionAnswering(config=lowercase_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_lowerCAmelCase : int = 4
_lowerCAmelCase : str = False
# hparam_utils.py hparams
_lowerCAmelCase : Tuple = 36.45_19
_lowerCAmelCase : int = 0.90_34_21
_lowerCAmelCase : str = 222.088
_lowerCAmelCase : int = True
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[int] = 0.76_31_41
_lowerCAmelCase : Optional[int] = TapasForQuestionAnswering(config=lowercase_ )
elif task == "TABFACT":
_lowerCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=lowercase_ )
elif task == "MLM":
_lowerCAmelCase : Dict = TapasForMaskedLM(config=lowercase_ )
elif task == "INTERMEDIATE_PRETRAINING":
_lowerCAmelCase : Optional[int] = TapasModel(config=lowercase_ )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase_ ,lowercase_ ,lowercase_ )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowercase_ )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
_lowerCAmelCase : List[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=512 )
tokenizer.save_pretrained(lowercase_ )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 362 | """simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_a : Union[str, Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
def __init__( self , a__ , a__=16 , a__=13 , a__=7 , a__=14 , a__=10 , a__=19 , a__=5 , a__=4 , a__=True , a__=16 , a__=2 , a__=4 , a__=4 , a__="gelu" , a__=0.1 , a__=0.1 , a__=[1, 2, 3, 4, 5] , a__=25 , a__=5 , ):
_lowerCAmelCase : Union[str, Any] = d_model
_lowerCAmelCase : int = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : Optional[int] = prediction_length
_lowerCAmelCase : int = context_length
_lowerCAmelCase : Optional[Any] = cardinality
_lowerCAmelCase : Tuple = num_time_features
_lowerCAmelCase : str = lags_sequence
_lowerCAmelCase : int = embedding_dimension
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = context_length
_lowerCAmelCase : Optional[int] = prediction_length + label_length
_lowerCAmelCase : Dict = label_length
_lowerCAmelCase : Dict = moving_average
_lowerCAmelCase : Union[str, Any] = autocorrelation_factor
def __A ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = config.context_length + max(config.lags_sequence )
_lowerCAmelCase : int = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowerCAmelCase : int = floats_tensor([self.batch_size, _past_length] )
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowerCAmelCase : Any = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, config.prediction_length] )
_lowerCAmelCase : Dict = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def __A ( self ):
_lowerCAmelCase : Any = self.get_config()
_lowerCAmelCase : str = self.prepare_autoformer_inputs_dict(a__ )
return config, inputs_dict
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = AutoformerModel(config=a__ ).to(a__ ).eval()
_lowerCAmelCase : int = model(**a__ )
_lowerCAmelCase : List[str] = outputs.encoder_last_hidden_state
_lowerCAmelCase : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[int] = model.get_encoder()
encoder.save_pretrained(a__ )
_lowerCAmelCase : Optional[int] = AutoformerEncoder.from_pretrained(a__ ).to(a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = model.create_network_inputs(**a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowerCAmelCase : Any = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_lowerCAmelCase : Dict = encoder(inputs_embeds=a__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_lowerCAmelCase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_lowerCAmelCase : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_lowerCAmelCase : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_lowerCAmelCase : Optional[int] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[int] = model.get_decoder()
decoder.save_pretrained(a__ )
_lowerCAmelCase : Any = AutoformerDecoder.from_pretrained(a__ ).to(a__ )
_lowerCAmelCase : List[Any] = decoder(
trend=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase : int = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
def __A ( self ):
_lowerCAmelCase : Tuple = AutoformerModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_class.from_pretrained(a__ , output_loading_info=a__ )
self.assertEqual(info["""missing_keys"""] , [] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Dict = inspect.signature(getattr(a__ , """forward""" ) )
# The main input is the name of the argument after `self`
_lowerCAmelCase : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(a__ )
_lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : List[str] = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(a__ )] , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = getattr(self.model_tester , """seq_length""" , a__ )
_lowerCAmelCase : List[str] = getattr(self.model_tester , """decoder_seq_length""" , a__ )
_lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , a__ )
_lowerCAmelCase : int = getattr(self.model_tester , """d_model""" , a__ )
_lowerCAmelCase : Optional[Any] = getattr(self.model_tester , """num_attention_heads""" , a__ )
_lowerCAmelCase : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : List[Any] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : int = True
_lowerCAmelCase : Optional[Any] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_lowerCAmelCase : Dict = len(a__ )
_lowerCAmelCase : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a__ , a__ )
# decoder attentions
_lowerCAmelCase : int = outputs.decoder_attentions
self.assertIsInstance(a__ , (list, tuple) )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_lowerCAmelCase : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(a__ , (list, tuple) )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[int] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 2 , len(a__ ) )
_lowerCAmelCase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __A ( self ):
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any="train-batch.pt" ) -> Optional[int]:
_lowerCAmelCase : List[Any] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" ,filename=_lowerCamelCase ,repo_type="""dataset""" )
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase ,map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : List[Any] = prepare_batch()
with torch.no_grad():
_lowerCAmelCase : str = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_lowerCAmelCase : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : Optional[int] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=a__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a__ , atol=a__ ) )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : Union[str, Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_lowerCAmelCase : str = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_lowerCAmelCase : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=a__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a__ , atol=a__ ) )
def __A ( self ):
_lowerCAmelCase : List[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : str = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_lowerCAmelCase : str = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_lowerCAmelCase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , a__ )
_lowerCAmelCase : Optional[Any] = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=a__ )
_lowerCAmelCase : Optional[int] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a__ , rtol=1e-1 ) )
| 126 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''albert'''
def __init__( self , _snake_case=30000 , _snake_case=128 , _snake_case=4096 , _snake_case=12 , _snake_case=1 , _snake_case=64 , _snake_case=16384 , _snake_case=1 , _snake_case="gelu_new" , _snake_case=0 , _snake_case=0 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=0.1 , _snake_case="absolute" , _snake_case=0 , _snake_case=2 , _snake_case=3 , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = embedding_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_hidden_groups
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = inner_group_num
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = classifier_dropout_prob
_lowerCAmelCase = position_embedding_type
class __lowerCAmelCase ( lowerCamelCase__ ):
@property
def snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 82 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A__ = TypeVar("""T""")
A__ = TypeVar("""U""")
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase , _lowerCAmelCase = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
_lowerCAmelCase = ["""DoubleLinkedList"""]
_lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(_snake_case ) )
_lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase = node
_lowerCAmelCase = previous
_lowerCAmelCase = node
_lowerCAmelCase = self.rear
def snake_case ( self , _snake_case ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowerCAmelCase = node.next
_lowerCAmelCase = node.prev
_lowerCAmelCase = None
_lowerCAmelCase = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
__lowerCamelCase = {}
def __init__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedList()
_lowerCAmelCase = capacity
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def __repr__( self ):
"""simple docstring"""
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , _snake_case ):
"""simple docstring"""
return key in self.cache
def snake_case ( self , _snake_case ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowerCAmelCase = self.cache[key]
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_snake_case )
return node.val
self.miss += 1
return None
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase = value
self.list.add(_snake_case )
@classmethod
def snake_case ( cls , _snake_case = 128 ):
"""simple docstring"""
def cache_decorator_inner(_snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*_snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase = LRUCache(_snake_case )
_lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase = func(*_snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , _snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_snake_case , """cache_info""" , _snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Dict =3
a__ : Optional[int] =2_5_0
a__ : Union[str, Any] =ids_tensor((batch_size, length) , lowerCAmelCase__ )
a__ : List[Any] =torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple =self._get_tensors(5 )
a__ : Any =StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Tuple =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Union[str, Any] =self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : str =MaxLengthCriteria(max_length=1_0 )
a__ : Optional[int] =self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : List[Any] =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Optional[Any] =self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a__ : Tuple =self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Optional[Any] =self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : int =self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Union[str, Any] =StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =self._get_tensors(5 )
a__ : Tuple =MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Optional[int] =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
a__ : Union[str, Any] =validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 367 |
UpperCAmelCase : Dict = [0, 2, 4, 6, 8]
UpperCAmelCase : Tuple = [1, 3, 5, 7, 9]
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
a__ : str =0
for digit in range(10 ):
a__ : int =digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return result
a__ : List[str] =0
for digita in range(10 ):
a__ : Optional[int] =digita
if (remainder + digita) % 2 == 0:
a__ : Dict =ODD_DIGITS
else:
a__ : Any =EVEN_DIGITS
for digita in other_parity_digits:
a__ : Union[str, Any] =digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return result
def _A ( SCREAMING_SNAKE_CASE : int = 9 ):
"""simple docstring"""
a__ : List[str] =0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(SCREAMING_SNAKE_CASE , 0 , [0] * length , SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 148 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.