code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> List[str]: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 384 UpperCAmelCase__ : Any = 7 if "tiny" in model_name: UpperCAmelCase__ : List[str] = 96 UpperCAmelCase__ : List[Any] = (2, 2, 6, 2) UpperCAmelCase__ : List[str] = (3, 6, 12, 24) elif "small" in model_name: UpperCAmelCase__ : List[Any] = 96 UpperCAmelCase__ : str = (2, 2, 18, 2) UpperCAmelCase__ : int = (3, 6, 12, 24) elif "base" in model_name: UpperCAmelCase__ : List[str] = 128 UpperCAmelCase__ : Any = (2, 2, 18, 2) UpperCAmelCase__ : str = (4, 8, 16, 32) UpperCAmelCase__ : Optional[int] = 12 UpperCAmelCase__ : Dict = 512 elif "large" in model_name: UpperCAmelCase__ : int = 192 UpperCAmelCase__ : str = (2, 2, 18, 2) UpperCAmelCase__ : Union[str, Any] = (6, 12, 24, 48) UpperCAmelCase__ : Dict = 12 UpperCAmelCase__ : List[Any] = 768 # set label information UpperCAmelCase__ : Optional[Any] = 150 UpperCAmelCase__ : Tuple = '''huggingface/label-files''' UpperCAmelCase__ : Dict = '''ade20k-id2label.json''' UpperCAmelCase__ : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase__ : Optional[int] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} UpperCAmelCase__ : Tuple = {v: k for k, v in idalabel.items()} UpperCAmelCase__ : Tuple = SwinConfig( embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , num_heads=UpperCamelCase__ , window_size=UpperCamelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] , ) UpperCAmelCase__ : Dict = UperNetConfig( backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , ) return config def SCREAMING_SNAKE_CASE__ ( snake_case : Dict )-> Tuple: '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] # fmt: off # stem rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Any )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = dct.pop(UpperCamelCase__ ) UpperCAmelCase__ : List[Any] = val def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Optional[int] )-> Dict: '''simple docstring''' UpperCAmelCase__ : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): UpperCAmelCase__ : str = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) UpperCAmelCase__ : Union[str, Any] = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) UpperCAmelCase__ : Optional[Any] = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase__ : Any = in_proj_weight[:dim, :] UpperCAmelCase__ : Optional[Any] = in_proj_bias[: dim] UpperCAmelCase__ : Optional[Any] = in_proj_weight[ dim : dim * 2, : ] UpperCAmelCase__ : str = in_proj_bias[ dim : dim * 2 ] UpperCAmelCase__ : List[Any] = in_proj_weight[ -dim :, : ] UpperCAmelCase__ : Union[str, Any] = in_proj_bias[-dim :] # fmt: on def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> List[Any]: '''simple docstring''' UpperCAmelCase__ : str = x.shape UpperCAmelCase__ : Union[str, Any] = x.reshape(UpperCamelCase__ , 4 , in_channel // 4 ) UpperCAmelCase__ : List[str] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ ) return x def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> int: '''simple docstring''' UpperCAmelCase__ : List[str] = x.shape UpperCAmelCase__ : Any = x.reshape(UpperCamelCase__ , in_channel // 4 , 4 ) UpperCAmelCase__ : Tuple = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ ) return x def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> Any: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = x.shape[0] UpperCAmelCase__ : Tuple = x.reshape(4 , in_channel // 4 ) UpperCAmelCase__ : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(UpperCamelCase__ ) return x def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> int: '''simple docstring''' UpperCAmelCase__ : str = x.shape[0] UpperCAmelCase__ : List[Any] = x.reshape(in_channel // 4 , 4 ) UpperCAmelCase__ : Optional[Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(UpperCamelCase__ ) return x def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] )-> Tuple: '''simple docstring''' UpperCAmelCase__ : Optional[int] = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } UpperCAmelCase__ : int = model_name_to_url[model_name] UpperCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="cpu" , file_name=UpperCamelCase__ )[ '''state_dict''' ] for name, param in state_dict.items(): print(UpperCamelCase__ , param.shape ) UpperCAmelCase__ : Optional[int] = get_upernet_config(UpperCamelCase__ ) UpperCAmelCase__ : List[str] = UperNetForSemanticSegmentation(UpperCamelCase__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): UpperCAmelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase__ ) if "bn" in key: UpperCAmelCase__ : Optional[Any] = key.replace("bn" , "batch_norm" ) UpperCAmelCase__ : List[Any] = val # rename keys UpperCAmelCase__ : Optional[int] = create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_q_k_v(UpperCamelCase__ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: UpperCAmelCase__ : Optional[Any] = reverse_correct_unfold_reduction_order(UpperCamelCase__ ) if "norm" in key: UpperCAmelCase__ : Tuple = reverse_correct_unfold_norm_order(UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify on image UpperCAmelCase__ : int = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' UpperCAmelCase__ : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("RGB" ) UpperCAmelCase__ : Optional[int] = SegformerImageProcessor() UpperCAmelCase__ : List[Any] = processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(UpperCamelCase__ ) UpperCAmelCase__ : List[Any] = outputs.logits print(logits.shape ) print("First values of logits:" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": UpperCAmelCase__ : Optional[int] = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": UpperCAmelCase__ : str = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": UpperCAmelCase__ : List[Any] = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCamelCase__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[F"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _lowerCAmelCase : int = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
352
"""simple docstring""" import functools def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int: '''simple docstring''' UpperCAmelCase__ : List[str] = len(snake_case ) UpperCAmelCase__ : str = len(snake_case ) @functools.cache def min_distance(snake_case : int , snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" import torch from transformers import AutoModel class lowerCAmelCase__ ( torch.nn.Module ): def __init__( self : str , snake_case__ : List[str]="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(snake_case__ , self ).__init__() UpperCAmelCase__ : int = AutoModel.from_pretrained(snake_case__ , return_dict=snake_case__ ) UpperCAmelCase__ : str = torch.nn.CosineSimilarity(3 , 1e-08 ) UpperCAmelCase__ : Optional[Any] = torch.nn.Softmax(dim=1 ) def __a ( self : Optional[Any] , **snake_case__ : Optional[Any] ): '''simple docstring''' return self.bert(**snake_case__ ).last_hidden_state def __a ( self : Any , snake_case__ : Any ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=snake_case__ ) def __a ( self : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : str=1 ): '''simple docstring''' return self.softmax(T * self.cos(snake_case__ , snake_case__ ) ) def __a ( self : List[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = W_supports["sizes"].tolist() UpperCAmelCase__ : str = W_supports["start_token_id"].item() UpperCAmelCase__ : Dict = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] UpperCAmelCase__ : str = self.BERT(**snake_case__ ) UpperCAmelCase__ : Tuple = self.BERT(**snake_case__ ) UpperCAmelCase__ : int = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Tuple = W_supports["input_ids"] == start_token_id UpperCAmelCase__ : List[Any] = W_supports["input_ids"] == end_token_id for i, size in enumerate(snake_case__ ): if i == 0: UpperCAmelCase__ : str = 0 else: UpperCAmelCase__ : Any = support_sizes[i - 1] UpperCAmelCase__ : Dict = S[s : s + size][start_token_masks[s : s + size]] UpperCAmelCase__ : Dict = S[s : s + size][end_token_masks[s : s + size]] UpperCAmelCase__ : Optional[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) UpperCAmelCase__ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: UpperCAmelCase__ : List[str] = torch.vstack((p_starts, p_start) ) UpperCAmelCase__ : str = torch.vstack((p_ends, p_end) ) else: UpperCAmelCase__ : List[str] = p_start UpperCAmelCase__ : List[Any] = p_end return p_starts, p_ends
353
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : List[Any] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase__ : Tuple = input_file.read() UpperCAmelCase__ : Tuple = regexp.search(snake_case__ ) return match def __a ( self : List[str] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase__ : Dict = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase__ : int = regexp.finditer(snake_case__ ) UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Path("./datasets" ) UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case__ ) ): raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = Path("./datasets" ) UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case__ ) ): raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
298
0
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } UpperCAmelCase__ : int = Dataset.from_dict(_lowerCAmelCase ) return dataset class lowerCAmelCase__ ( lowerCamelCase__ ): def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = get_dataset() UpperCAmelCase__ : Any = make_duplicate_clusters(snake_case__ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = get_dataset() UpperCAmelCase__ : List[str] = deduplicate_dataset(snake_case__ ) self.assertEqual(len(snake_case__ ) , 2 ) print(snake_case__ ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , snake_case__ )
354
"""simple docstring""" import numpy as np import datasets _lowerCAmelCase : Optional[int] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ _lowerCAmelCase : Tuple = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ _lowerCAmelCase : Optional[int] = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ): '''simple docstring''' # convert to numpy arrays UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ ) UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T ) try: UpperCAmelCase__ : str = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ ) UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ ) UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
298
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''xlm-roberta''' def __init__( self : str , snake_case__ : int=3_0_5_2_2 , snake_case__ : str=7_6_8 , snake_case__ : str=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : List[str]=3_0_7_2 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : str=5_1_2 , snake_case__ : List[str]=2 , snake_case__ : List[str]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : int=1 , snake_case__ : Dict=0 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[Any]="absolute" , snake_case__ : Optional[Any]=True , snake_case__ : Dict=None , **snake_case__ : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) UpperCAmelCase__ : Any = vocab_size UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Optional[Any] = num_hidden_layers UpperCAmelCase__ : str = num_attention_heads UpperCAmelCase__ : List[str] = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_dropout_prob UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Tuple = type_vocab_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Union[str, Any] = layer_norm_eps UpperCAmelCase__ : str = position_embedding_type UpperCAmelCase__ : Tuple = use_cache UpperCAmelCase__ : Dict = classifier_dropout class lowerCAmelCase__ ( __magic_name__ ): @property def __a ( self : int ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase__ : str = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase__ : int = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
355
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =IFPipeline SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self : Dict ): '''simple docstring''' return self._get_dummy_components() def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): UpperCAmelCase__ : str = torch.manual_seed(snake_case__ ) else: UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase__ : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self : Tuple ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self : Tuple ): '''simple docstring''' # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __a ( self : Dict ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __a ( self : int ): '''simple docstring''' self._test_save_load_local() def __a ( self : Any ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self : Optional[Any] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : str ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Tuple ): '''simple docstring''' # if UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : List[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : str = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Tuple = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : str = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Optional[Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
298
0
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Union[str, Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 3 UpperCAmelCase__ : Optional[int] = 2_5_0 UpperCAmelCase__ : Union[str, Any] = ids_tensor((batch_size, length) , lowercase_ ) UpperCAmelCase__ : Dict = torch.ones((batch_size, length) , device=lowercase_ , dtype=torch.float ) / length return input_ids, scores def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self._get_tensors(5 ) UpperCAmelCase__ : Any = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) UpperCAmelCase__ , UpperCAmelCase__ : int = self._get_tensors(1_0 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = MaxLengthCriteria(max_length=1_0 ) UpperCAmelCase__ , UpperCAmelCase__ : str = self._get_tensors(5 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self._get_tensors(9 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self._get_tensors(1_0 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_tensors(5 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) UpperCAmelCase__ , UpperCAmelCase__ : int = self._get_tensors(9 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_tensors(1_0 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) UpperCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self._get_tensors(5 ) UpperCAmelCase__ : List[str] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) UpperCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) def __a ( self : List[str] ): '''simple docstring''' validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(lowercase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) UpperCAmelCase__ : Tuple = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(lowercase_ ) , 1 )
356
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } _lowerCAmelCase : List[Any] = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } _lowerCAmelCase : int = { """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = set() UpperCAmelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Dict = char UpperCAmelCase__ : Tuple = set(snake_case ) return pairs class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : Dict = vocab_file UpperCAmelCase__ : Tuple = merges_file UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Dict = 3 self.add_from_file(snake_case__ ) UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1] UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges] UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Dict = {} def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] UpperCAmelCase__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Tuple = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : Dict , snake_case__ : Tuple ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase__ : Any = get_pairs(snake_case__ ) if not pairs: return token while True: UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Tuple = 0 while i < len(snake_case__ ): try: UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Dict = j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Dict = tuple(snake_case__ ) UpperCAmelCase__ : List[Any] = new_word if len(snake_case__ ) == 1: break else: UpperCAmelCase__ : Dict = get_pairs(snake_case__ ) UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ ) UpperCAmelCase__ : Optional[int] = word[:-4] UpperCAmelCase__ : Union[str, Any] = word return word def __a ( self : List[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def __a ( self : Dict , snake_case__ : List[str] ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def __a ( self : List[Any] , snake_case__ : Any ): '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def __a ( self : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : Tuple = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : str = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ): copyfile(self.merges_file , snake_case__ ) return out_vocab_file, out_merge_file def __a ( self : List[Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): try: with open(snake_case__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' ) return UpperCAmelCase__ : Dict = f.readlines() for lineTmp in lines: UpperCAmelCase__ : Optional[int] = lineTmp.strip() UpperCAmelCase__ : Tuple = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) UpperCAmelCase__ : Any = line[:idx] UpperCAmelCase__ : str = len(self.encoder )
298
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Dict = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys _lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
357
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 # setable values SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None @classmethod def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ ) @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ =42 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return True @register_to_config def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ): '''simple docstring''' UpperCAmelCase__ : Tuple = dtype def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ): '''simple docstring''' if common is None: UpperCAmelCase__ : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype ) UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , ) def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ): '''simple docstring''' return sample def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case__ , timesteps=snake_case__ , ) def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: UpperCAmelCase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": UpperCAmelCase__ : List[Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": UpperCAmelCase__ : List[str] = variance UpperCAmelCase__ : Optional[Any] = state.common.betas[t] UpperCAmelCase__ : Any = (predicted_variance + 1) / 2 UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if key is None: UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 ) else: UpperCAmelCase__ : int = None # 1. compute alphas, betas UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) UpperCAmelCase__ : List[str] = 1 - alpha_prod_t UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 ) UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ ) def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __len__( self : Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
298
0
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin _lowerCAmelCase : Tuple = ''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class lowerCAmelCase__ ( unittest.TestCase , a_ ): def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = load_tool("text-question-answering" ) self.tool.setup() UpperCAmelCase__ : Optional[Any] = load_tool("text-question-answering" , remote=lowercase_ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.tool(lowercase_ , "What did Hugging Face do in April 2021?" ) self.assertEqual(lowercase_ , "launched the BigScience Research Workshop" ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.remote_tool(lowercase_ , "What did Hugging Face do in April 2021?" ) self.assertEqual(lowercase_ , "launched the BigScience Research Workshop" ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.tool(text=lowercase_ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(lowercase_ , "launched the BigScience Research Workshop" ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.remote_tool(text=lowercase_ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(lowercase_ , "launched the BigScience Research Workshop" )
358
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCAmelCase__ : def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : Union[str, Any] = act_dim UpperCAmelCase__ : Dict = state_dim UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : List[str] = max_length UpperCAmelCase__ : int = is_training def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 ) UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase__ : Optional[int] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __a ( self : int ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase__ : Optional[int] = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =() SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids SCREAMING_SNAKE_CASE_ =False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = DecisionTransformerModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : List[str] ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Tuple = [*signature.parameters.keys()] UpperCAmelCase__ : str = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Optional[int] = model.config torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset() UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ ) UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase__ : Union[str, Any] = state UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 ) for step in range(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model( states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1] UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 ) UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase__ : Tuple = torch.cat( [timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
298
0
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging _lowerCAmelCase : Any = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( )-> Tuple: '''simple docstring''' UpperCAmelCase__ : int = os.getenv("SM_HP_MP_PARAMETERS" , "{}" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. UpperCAmelCase__ : List[str] = json.loads(_lowercase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. UpperCAmelCase__ : Dict = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". UpperCAmelCase__ : Any = json.loads(_lowercase ) if not mpi_options.get("sagemaker_mpi_enabled" , _lowercase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =field( default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , ) def __a ( self : List[Any] ): '''simple docstring''' super().__post_init__() warnings.warn( "`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use " "`TrainingArguments` instead." , __A , ) @cached_property def __a ( self : Tuple ): '''simple docstring''' logger.info("PyTorch: setting up devices" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( "torch.distributed process group is initialized, but local_rank == -1. " "In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" ) if self.no_cuda: UpperCAmelCase__ : Union[str, Any] = torch.device("cpu" ) UpperCAmelCase__ : int = 0 elif is_sagemaker_model_parallel_available(): UpperCAmelCase__ : Optional[Any] = smp.local_rank() UpperCAmelCase__ : Any = torch.device("cuda" , __A ) UpperCAmelCase__ : Union[str, Any] = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta ) UpperCAmelCase__ : Tuple = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) ) UpperCAmelCase__ : Optional[int] = torch.device("cuda" , self.local_rank ) UpperCAmelCase__ : Dict = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 UpperCAmelCase__ : Tuple = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. UpperCAmelCase__ : str = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta ) UpperCAmelCase__ : List[str] = torch.device("cuda" , self.local_rank ) UpperCAmelCase__ : int = 1 if device.type == "cuda": torch.cuda.set_device(__A ) return device @property def __a ( self : Optional[int] ): '''simple docstring''' if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def __a ( self : str ): '''simple docstring''' return not is_sagemaker_model_parallel_available() @property def __a ( self : int ): '''simple docstring''' return False
359
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
298
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 1_2_8, """min_length""": 1_2, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_4_2, """min_length""": 5_6, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 6_2, """min_length""": 1_1, """num_beams""": 6}, } } UpperCAmelCase__ : Optional[Any] = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 1_2_8, """task_specific_params.summarization.min_length""": 1_2, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 1_4_2, """task_specific_params.summarization_cnn.min_length""": 5_6, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 6_2, """task_specific_params.summarization_xsum.min_length""": 1_1, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , x.transpose() ) ) UpperCAmelCase__ : List[Any] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.random.randn(3 , 4 ) UpperCAmelCase__ : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) ) UpperCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) ) @require_tf def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.random.randn(3 , 4 ) UpperCAmelCase__ : Dict = tf.constant(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , transpose(_SCREAMING_SNAKE_CASE ).numpy() ) ) UpperCAmelCase__ : List[Any] = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) ) @require_flax def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = np.random.randn(3 , 4 ) UpperCAmelCase__ : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE ) ) ) ) UpperCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : int = jnp.array(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , np.asarray(transpose(_SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) ) ) ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) ) UpperCAmelCase__ : Any = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) , np.reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) ) ) @require_torch def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = np.random.randn(3 , 4 ) UpperCAmelCase__ : str = torch.tensor(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) ) UpperCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ).numpy() ) ) @require_tf def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = np.random.randn(3 , 4 ) UpperCAmelCase__ : Union[str, Any] = tf.constant(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(_SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) ) UpperCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : List[Any] = tf.constant(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) , reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ).numpy() ) ) @require_flax def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 ) UpperCAmelCase__ : Union[str, Any] = jnp.array(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (4, 3) ) ) ) ) UpperCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : List[str] = jnp.array(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) , np.asarray(reshape(_SCREAMING_SNAKE_CASE , (1_2, 5) ) ) ) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.squeeze(_SCREAMING_SNAKE_CASE ) ) ) UpperCAmelCase__ : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) ) @require_torch def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = np.random.randn(1 , 3 , 4 ) UpperCAmelCase__ : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) ) UpperCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 ) UpperCAmelCase__ : Tuple = torch.tensor(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) ) @require_tf def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = np.random.randn(1 , 3 , 4 ) UpperCAmelCase__ : Dict = tf.constant(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , squeeze(_SCREAMING_SNAKE_CASE ).numpy() ) ) UpperCAmelCase__ : Dict = np.random.randn(1 , 4 , 1 , 5 ) UpperCAmelCase__ : List[str] = tf.constant(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(_SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) ) @require_flax def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = np.random.randn(1 , 3 , 4 ) UpperCAmelCase__ : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE ) ) ) ) UpperCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 ) UpperCAmelCase__ : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) , np.asarray(squeeze(_SCREAMING_SNAKE_CASE , axis=2 ) ) ) ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) ) @require_torch def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = np.random.randn(3 , 4 ) UpperCAmelCase__ : str = torch.tensor(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) ) @require_tf def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = np.random.randn(3 , 4 ) UpperCAmelCase__ : List[str] = tf.constant(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) ) @require_flax def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.random.randn(3 , 4 ) UpperCAmelCase__ : Optional[Any] = jnp.array(_SCREAMING_SNAKE_CASE ) self.assertTrue(np.allclose(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) , np.asarray(expand_dims(_SCREAMING_SNAKE_CASE , axis=1 ) ) ) )
360
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
0
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin _lowerCAmelCase : str = """\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n""" class lowerCAmelCase__ ( unittest.TestCase , __magic_name__ ): def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = load_tool("text-question-answering" ) self.tool.setup() UpperCAmelCase__ : Optional[Any] = load_tool("text-question-answering" , remote=lowerCamelCase_ ) def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.tool(lowerCamelCase_ , "What did Hugging Face do in April 2021?" ) self.assertEqual(lowerCamelCase_ , "launched the BigScience Research Workshop" ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.remote_tool(lowerCamelCase_ , "What did Hugging Face do in April 2021?" ) self.assertEqual(lowerCamelCase_ , "launched the BigScience Research Workshop" ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.tool(text=lowerCamelCase_ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(lowerCamelCase_ , "launched the BigScience Research Workshop" ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.remote_tool(text=lowerCamelCase_ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(lowerCamelCase_ , "launched the BigScience Research Workshop" )
361
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy() UpperCAmelCase__ : List[Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
298
0
"""simple docstring""" import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 0 @slow def __a ( self : Any ): '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(snake_case__ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(snake_case__ ) , 0 ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) # Check that tokenizer_type ≠ model_type UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(snake_case__ , config=snake_case__ ) self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def __a ( self : Tuple ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(snake_case__ , "vocab.txt" ) ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="bert" , use_fast=snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.json" , os.path.join(snake_case__ , "vocab.json" ) ) shutil.copy("./tests/fixtures/merges.txt" , os.path.join(snake_case__ , "merges.txt" ) ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="gpt2" , use_fast=snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @require_tokenizers def __a ( self : List[Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(snake_case__ , "vocab.txt" ) ) UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="bert" ) self.assertIsInstance(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.json" , os.path.join(snake_case__ , "vocab.json" ) ) shutil.copy("./tests/fixtures/merges.txt" , os.path.join(snake_case__ , "merges.txt" ) ) UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="gpt2" ) self.assertIsInstance(snake_case__ , snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' with pytest.raises(snake_case__ ): AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" ) @require_tokenizers def __a ( self : Union[str, Any] ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase__ : Any = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" ) self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) ) if isinstance(snake_case__ , snake_case__ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , snake_case__ ) else: self.assertEqual(tokenizer.do_lower_case , snake_case__ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def __a ( self : Optional[int] ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( snake_case__ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ): UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Tuple = TOKENIZER_MAPPING.values() UpperCAmelCase__ : Any = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(snake_case__ ) @require_tokenizers def __a ( self : Any ): '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=snake_case__ ) , snake_case__ ) self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , snake_case__ ) @require_tokenizers def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : str = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=snake_case__ ) UpperCAmelCase__ : Tuple = '''Hello, world. How are you?''' UpperCAmelCase__ : Optional[int] = tokenizer.tokenize(snake_case__ ) self.assertEqual("[UNK]" , tokens[0] ) UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=snake_case__ ) UpperCAmelCase__ : int = tokenizer.tokenize(snake_case__ ) self.assertEqual("[UNK]" , tokens[0] ) @require_tokenizers def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" ) self.assertEqual(type(snake_case__ ) , snake_case__ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , "[UNK]" ) self.assertEqual(tokenizer.padding_side , "right" ) self.assertEqual(tokenizer.truncation_side , "right" ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("ctrl" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(snake_case__ , snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = get_tokenizer_config("bert-base-cased" ) UpperCAmelCase__ : Dict = config.pop("_commit_hash" , snake_case__ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(snake_case__ , {"do_lower_case": False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase__ : List[str] = get_tokenizer_config(snake_case__ ) self.assertDictEqual(snake_case__ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(snake_case__ ) UpperCAmelCase__ : Optional[Any] = get_tokenizer_config(snake_case__ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["tokenizer_class"] , "BertTokenizer" ) def __a ( self : Dict ): '''simple docstring''' try: AutoConfig.register("custom" , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) UpperCAmelCase__ : Optional[Any] = CustomTokenizer.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def __a ( self : Optional[int] ): '''simple docstring''' try: AutoConfig.register("custom" , snake_case__ ) # Can register in two steps AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( snake_case__ , slow_tokenizer_class=snake_case__ , fast_tokenizer_class=snake_case__ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained(snake_case__ ) bert_tokenizer.save_pretrained(snake_case__ ) UpperCAmelCase__ : List[str] = CustomTokenizerFast.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(snake_case__ ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(snake_case__ , use_fast=snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __a ( self : Dict ): '''simple docstring''' with self.assertRaises(snake_case__ ): UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ ) UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(snake_case__ ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(snake_case__ ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ , use_fast=snake_case__ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" ) @require_tokenizers def __a ( self : Dict ): '''simple docstring''' class lowerCAmelCase__ ( lowerCamelCase__ ): SCREAMING_SNAKE_CASE_ =False class lowerCAmelCase__ ( lowerCamelCase__ ): SCREAMING_SNAKE_CASE_ =NewTokenizer SCREAMING_SNAKE_CASE_ =False try: AutoConfig.register("custom" , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ ) # If remote code is not set, the default is to use local UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=snake_case__ ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase__ : str = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=snake_case__ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=snake_case__ , use_fast=snake_case__ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) def __a ( self : Union[str, Any] ): '''simple docstring''' with self.assertRaisesRegex( snake_case__ , "bert-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("bert-base" ) def __a ( self : List[str] ): '''simple docstring''' with self.assertRaisesRegex( snake_case__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case__ , revision="aaaaaa" ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) with RequestCounter() as counter: UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
362
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : int = seq_length UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Union[str, Any] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Any = embedding_size UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : List[Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Tuple = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : Union[str, Any] = scope def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = AlbertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_labels UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.num_labels UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_choices UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Tuple = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =True def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ): '''simple docstring''' UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Dict = type self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" ) UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case__ ) UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
298
0
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets _lowerCAmelCase : Tuple = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' _lowerCAmelCase : List[str] = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' _lowerCAmelCase : List[Any] = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : List[Any] ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __a ( self : Dict , snake_case__ : str , snake_case__ : Dict , snake_case__ : str = False , snake_case__ : Any = False , snake_case__ : List[Any] = False , snake_case__ : List[Any] = False , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = len(references[0] ) if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCAmelCase__ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )] UpperCAmelCase__ : Union[str, Any] = TER( normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , ) UpperCAmelCase__ : Optional[Any] = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
363
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = list(range(snake_case ) ) # Find permutation while factorials: UpperCAmelCase__ : str = factorials.pop() UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase : str = { """configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = ["""RemBertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = ["""RemBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = [ """REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """RemBertForCausalLM""", """RemBertForMaskedLM""", """RemBertForMultipleChoice""", """RemBertForQuestionAnswering""", """RemBertForSequenceClassification""", """RemBertForTokenClassification""", """RemBertLayer""", """RemBertModel""", """RemBertPreTrainedModel""", """load_tf_weights_in_rembert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : List[str] = [ """TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRemBertForCausalLM""", """TFRemBertForMaskedLM""", """TFRemBertForMultipleChoice""", """TFRemBertForQuestionAnswering""", """TFRemBertForSequenceClassification""", """TFRemBertForTokenClassification""", """TFRemBertLayer""", """TFRemBertModel""", """TFRemBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
364
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _lowerCAmelCase : Union[str, Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0} UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : int = min_resolution UpperCAmelCase__ : Tuple = max_resolution UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : Optional[int] = do_normalize UpperCAmelCase__ : str = do_convert_rgb UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def __a ( self : str ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = PixaStructImageProcessingTester(self ) @property def __a ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ : Dict = 2_0_4_8 UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase__ : Optional[int] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(snake_case__ ): UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches UpperCAmelCase__ : Optional[Any] = "Hello" UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Dict ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Dict = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : List[str] = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Optional[int] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase__ : Optional[int] = 3 @property def __a ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : int ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
298
0
"""simple docstring""" import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("""Googling.....""") _lowerCAmelCase : Optional[int] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:]) _lowerCAmelCase : Optional[Any] = requests.get(url, headers={"""UserAgent""": UserAgent().random}) # res.raise_for_status() with open("""project1a.html""", """wb""") as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) _lowerCAmelCase : Tuple = BeautifulSoup(res.text, """html.parser""") _lowerCAmelCase : Union[str, Any] = list(soup.select(""".eZt8xd"""))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("""href""")) else: webbrowser.open(F"""https://google.com{link.get("href")}""")
365
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : int = "mock-s3-bucket" UpperCAmelCase__ : Any = f's3://{mock_bucket}' UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case ) assert dataset_path.startswith("s3://" ) is False UpperCAmelCase__ : str = "./local/path" UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is True UpperCAmelCase__ : str = fsspec.filesystem("file" ) UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case ) UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} UpperCAmelCase__ : int = compressed_file_paths[protocol] UpperCAmelCase__ : Any = "dataset.jsonl" UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case ) UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(snake_case ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
298
0
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[Any] , snake_case : Optional[int] )-> str: UpperCAmelCase__ : List[str] = OmegaConf.load(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = torch.load(lowerCAmelCase__ , map_location="cpu" )["""model"""] UpperCAmelCase__ : List[str] = list(state_dict.keys() ) # extract state_dict for VQVAE UpperCAmelCase__ : Dict = {} UpperCAmelCase__ : Dict = """first_stage_model.""" for key in keys: if key.startswith(lowerCAmelCase__ ): UpperCAmelCase__ : List[Any] = state_dict[key] # extract state_dict for UNetLDM UpperCAmelCase__ : List[str] = {} UpperCAmelCase__ : List[str] = """model.diffusion_model.""" for key in keys: if key.startswith(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = state_dict[key] UpperCAmelCase__ : Dict = config.model.params.first_stage_config.params UpperCAmelCase__ : List[Any] = config.model.params.unet_config.params UpperCAmelCase__ : Optional[int] = VQModel(**lowerCAmelCase__ ).eval() vqvae.load_state_dict(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = UNetLDMModel(**lowerCAmelCase__ ).eval() unet.load_state_dict(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[Any] = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCAmelCase__ , ) UpperCAmelCase__ : Union[str, Any] = LDMPipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) pipeline.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", type=str, required=True) parser.add_argument("""--config_path""", type=str, required=True) parser.add_argument("""--output_path""", type=str, required=True) _lowerCAmelCase : List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''megatron-bert''' def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Any = max_position_embeddings UpperCAmelCase__ : Dict = type_vocab_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : int = layer_norm_eps UpperCAmelCase__ : Optional[Any] = position_embedding_type UpperCAmelCase__ : Any = use_cache
298
0
"""simple docstring""" import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase__ : def __init__( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1_3 , snake_case__ : Tuple=3_0 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=3 , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Tuple=3_2 , snake_case__ : Dict=5 , snake_case__ : int=4 , snake_case__ : List[Any]=3_7 , snake_case__ : List[str]="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : str=1_0 , snake_case__ : List[str]=0.02 , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=2 , ): '''simple docstring''' UpperCAmelCase__ : Any = parent UpperCAmelCase__ : Optional[int] = batch_size UpperCAmelCase__ : Union[str, Any] = image_size UpperCAmelCase__ : Tuple = patch_size UpperCAmelCase__ : Optional[int] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : List[str] = use_labels UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : Optional[Any] = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : List[Any] = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Dict = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = type_sequence_label_size UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : Dict = scope UpperCAmelCase__ : List[str] = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : Optional[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Tuple = num_patches + 1 def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : List[str] = self.get_config() return config, pixel_values, labels def __a ( self : Union[str, Any] ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __a ( self : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ViTModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCAmelCase__ : str = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = ViTForMaskedImageModeling(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCAmelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase__ : Tuple = 1 UpperCAmelCase__ : List[Any] = ViTForMaskedImageModeling(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : List[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.type_sequence_label_size UpperCAmelCase__ : str = ViTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCAmelCase__ : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Dict = 1 UpperCAmelCase__ : List[str] = ViTForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[int] = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( UpperCAmelCase__ ) : Optional[Any] = config_and_inputs UpperCAmelCase__ : List[str] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( {'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =True SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ViTModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def __a ( self : int ): '''simple docstring''' pass def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : List[str] = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : str = model_class(UpperCamelCase__ ) UpperCAmelCase__ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase__ : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def __a ( self : List[Any] ): '''simple docstring''' for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : str = ViTModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Optional[Any] ): '''simple docstring''' return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(UpperCamelCase__ ) UpperCAmelCase__ : Optional[int] = self.default_image_processor UpperCAmelCase__ : List[str] = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : str = model(**UpperCamelCase__ ) # verify the logits UpperCAmelCase__ : Dict = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) UpperCAmelCase__ : List[str] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : str = ViTModel.from_pretrained("facebook/dino-vits8" ).to(UpperCamelCase__ ) UpperCAmelCase__ : Dict = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=4_8_0 ) UpperCAmelCase__ : str = prepare_img() UpperCAmelCase__ : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ) UpperCAmelCase__ : Tuple = inputs.pixel_values.to(UpperCamelCase__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Tuple = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ ) # verify the logits UpperCAmelCase__ : List[Any] = torch.Size((1, 3_6_0_1, 3_8_4) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ ) UpperCAmelCase__ : Any = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" ) UpperCAmelCase__ : List[str] = self.default_image_processor UpperCAmelCase__ : Optional[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="pt" ) UpperCAmelCase__ : Tuple = inputs.pixel_values.to(UpperCamelCase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): UpperCAmelCase__ : Dict = model(UpperCamelCase__ )
367
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Dict ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , ) def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def __a ( self : Any , snake_case__ : str , snake_case__ : str ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class lowerCAmelCase__ ( __magic_name__ ): @require_beam def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : Dict ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Dict = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
0
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = torch.nn.Linear(1_0 , 1_0 ) UpperCAmelCase__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) UpperCAmelCase__ : Optional[int] = Accelerator() UpperCAmelCase__ : Optional[int] = accelerator.prepare(snake_case__ ) try: pickle.loads(pickle.dumps(snake_case__ ) ) except Exception as e: self.fail(f'Accelerated optimizer pickling failed with {e}' ) AcceleratorState._reset_state()
368
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =XLMTokenizer SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case__ ) ) def __a ( self : Union[str, Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = "lower newer" UpperCAmelCase__ : Optional[Any] = "lower newer" return input_text, output_text def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : List[Any] = "lower" UpperCAmelCase__ : Any = ["low", "er</w>"] UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"] UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
298
0
import unittest from transformers import DonutProcessor _lowerCAmelCase : Optional[int] = """naver-clova-ix/donut-base""" class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = DonutProcessor.from_pretrained(UpperCamelCase__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = { "name": "John Doe", "age": "99", "city": "Atlanta", "state": "GA", "zip": "30301", "phone": "123-4567", "nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}], } UpperCAmelCase__ : List[str] = ( "<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>" "<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>" "<s_nicknames><s_nickname>Johnny</s_nickname>" "<sep/><s_nickname>JD</s_nickname></s_nicknames>" ) UpperCAmelCase__ : int = self.processor.tokenajson(UpperCamelCase__ ) self.assertDictEqual(UpperCamelCase__ , UpperCamelCase__ )
369
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ): '''simple docstring''' UpperCAmelCase__ : Any = "bilinear" UpperCAmelCase__ : Any = max_size UpperCAmelCase__ : Any = short_edge_length def __call__( self : Dict , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for img in imgs: UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ ) if h < w: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w else: UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size if max(snake_case__ , snake_case__ ) > self.max_size: UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = newh * scale UpperCAmelCase__ : int = neww * scale UpperCAmelCase__ : List[Any] = int(neww + 0.5 ) UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase__ : Any = Image.fromarray(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ ) else: UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase__ : Tuple = nn.functional.interpolate( snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 ) img_augs.append(snake_case__ ) return img_augs class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase__ : Any = cfg.INPUT.FORMAT UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY UpperCAmelCase__ : str = cfg.PAD_VALUE UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std def __a ( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images] UpperCAmelCase__ : int = [ nn.functional.pad( snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case__ , snake_case__ ) ] return torch.stack(snake_case__ ), torch.tensor(snake_case__ ) def __call__( self : str , snake_case__ : int , snake_case__ : int=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [images] if single_image: assert len(snake_case__ ) == 1 for i in range(len(snake_case__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase__ : Tuple = self.aug(snake_case__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images] # now pad them to do the following operations UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int: '''simple docstring''' assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size tensor[:, 0].clamp_(min=0 , max=snake_case ) tensor[:, 1].clamp_(min=0 , max=snake_case ) tensor[:, 2].clamp_(min=0 , max=snake_case ) tensor[:, 3].clamp_(min=0 , max=snake_case )
298
0
"""simple docstring""" import sys from collections import defaultdict class lowerCAmelCase__ : def __init__( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] def __a ( self : List[Any] , snake_case__ : List[Any] ): '''simple docstring''' return self.node_position[vertex] def __a ( self : List[Any] , snake_case__ : List[Any] , snake_case__ : str ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = pos def __a ( self : Dict , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : int ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: UpperCAmelCase__ : List[str] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: UpperCAmelCase__ : Optional[Any] = 2 * start + 1 else: UpperCAmelCase__ : int = 2 * start + 2 if heap[smallest_child] < heap[start]: UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = heap[smallest_child], positions[smallest_child] UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = ( heap[start], positions[start], ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = temp, tempa UpperCAmelCase__ : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , _lowercase ) self.top_to_bottom(_lowercase , _lowercase , _lowercase , _lowercase ) def __a ( self : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = position[index] while index != 0: UpperCAmelCase__ : Optional[int] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: UpperCAmelCase__ : Any = heap[parent] UpperCAmelCase__ : Tuple = position[parent] self.set_position(position[parent] , _lowercase ) else: UpperCAmelCase__ : List[Any] = val UpperCAmelCase__ : Tuple = temp self.set_position(_lowercase , _lowercase ) break UpperCAmelCase__ : List[Any] = parent else: UpperCAmelCase__ : List[str] = val UpperCAmelCase__ : Tuple = temp self.set_position(_lowercase , 0 ) def __a ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = len(_lowercase ) // 2 - 1 for i in range(_lowercase , -1 , -1 ): self.top_to_bottom(_lowercase , _lowercase , len(_lowercase ) , _lowercase ) def __a ( self : Tuple , snake_case__ : Any , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Dict = positions[0] UpperCAmelCase__ : int = sys.maxsize self.top_to_bottom(_lowercase , 0 , len(_lowercase ) , _lowercase ) return temp def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : List[str] = Heap() UpperCAmelCase__ : Any = [0] * len(snake_case_ ) UpperCAmelCase__ : Tuple = [-1] * len(snake_case_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph UpperCAmelCase__ : List[Any] = [] # Heap of Distance of vertices from their neighboring vertex UpperCAmelCase__ : Dict = [] for vertex in range(len(snake_case_ ) ): distance_tv.append(sys.maxsize ) positions.append(snake_case_ ) heap.node_position.append(snake_case_ ) UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Dict = 1 UpperCAmelCase__ : str = sys.maxsize for neighbor, distance in adjacency_list[0]: UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : List[str] = distance heap.heapify(snake_case_ , snake_case_ ) for _ in range(1 , len(snake_case_ ) ): UpperCAmelCase__ : Optional[int] = heap.delete_minimum(snake_case_ , snake_case_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) UpperCAmelCase__ : Optional[int] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(snake_case_ )] ): UpperCAmelCase__ : List[Any] = distance heap.bottom_to_top( snake_case_ , heap.get_position(snake_case_ ) , snake_case_ , snake_case_ ) UpperCAmelCase__ : Union[str, Any] = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _lowerCAmelCase : str = int(input("""Enter number of edges: """).strip()) _lowerCAmelCase : Union[str, Any] = defaultdict(list) for _ in range(edges_number): _lowerCAmelCase : Union[str, Any] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
370
"""simple docstring""" import qiskit def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" ) UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
298
0
"""simple docstring""" import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging _lowerCAmelCase : Any = logging.get_logger(__name__) class lowerCAmelCase__ ( _a ): SCREAMING_SNAKE_CASE_ ="""linear""" SCREAMING_SNAKE_CASE_ ="""cosine""" SCREAMING_SNAKE_CASE_ ="""cosine_with_restarts""" SCREAMING_SNAKE_CASE_ ="""polynomial""" SCREAMING_SNAKE_CASE_ ="""constant""" SCREAMING_SNAKE_CASE_ ="""constant_with_warmup""" SCREAMING_SNAKE_CASE_ ="""piecewise_constant""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optimizer , snake_case : int = -1 )-> Optional[Any]: '''simple docstring''' return LambdaLR(a__ , lambda snake_case : 1 , last_epoch=a__ ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optimizer , snake_case : int , snake_case : int = -1 )-> Union[str, Any]: '''simple docstring''' def lr_lambda(snake_case : int ): if current_step < num_warmup_steps: return float(a__ ) / float(max(1.0 , a__ ) ) return 1.0 return LambdaLR(a__ , a__ , last_epoch=a__ ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optimizer , snake_case : str , snake_case : int = -1 )-> Dict: '''simple docstring''' UpperCAmelCase__ : int = {} UpperCAmelCase__ : Union[str, Any] = step_rules.split("," ) for rule_str in rule_list[:-1]: UpperCAmelCase__ , UpperCAmelCase__ : Dict = rule_str.split(":" ) UpperCAmelCase__ : Tuple = int(a__ ) UpperCAmelCase__ : List[str] = float(a__ ) UpperCAmelCase__ : str = value UpperCAmelCase__ : Optional[Any] = float(rule_list[-1] ) def create_rules_function(snake_case : Dict , snake_case : List[Any] ): def rule_func(snake_case : int ) -> float: UpperCAmelCase__ : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(a__ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func UpperCAmelCase__ : Optional[Any] = create_rules_function(a__ , a__ ) return LambdaLR(a__ , a__ , last_epoch=a__ ) def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Dict , snake_case : str , snake_case : Optional[int]=-1 )-> Optional[Any]: '''simple docstring''' def lr_lambda(snake_case : int ): if current_step < num_warmup_steps: return float(a__ ) / float(max(1 , a__ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(a__ , a__ , a__ ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : float = 0.5 , snake_case : int = -1 )-> str: '''simple docstring''' def lr_lambda(snake_case : Union[str, Any] ): if current_step < num_warmup_steps: return float(a__ ) / float(max(1 , a__ ) ) UpperCAmelCase__ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(a__ ) * 2.0 * progress )) ) return LambdaLR(a__ , a__ , a__ ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : int = 1 , snake_case : int = -1 )-> int: '''simple docstring''' def lr_lambda(snake_case : List[Any] ): if current_step < num_warmup_steps: return float(a__ ) / float(max(1 , a__ ) ) UpperCAmelCase__ : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(a__ ) * progress) % 1.0) )) ) return LambdaLR(a__ , a__ , a__ ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : List[str] , snake_case : Optional[int]=1E-7 , snake_case : str=1.0 , snake_case : int=-1 )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Any = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' ) def lr_lambda(snake_case : int ): if current_step < num_warmup_steps: return float(a__ ) / float(max(1 , a__ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: UpperCAmelCase__ : Optional[int] = lr_init - lr_end UpperCAmelCase__ : Optional[Any] = num_training_steps - num_warmup_steps UpperCAmelCase__ : Any = 1 - (current_step - num_warmup_steps) / decay_steps UpperCAmelCase__ : int = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(a__ , a__ , a__ ) _lowerCAmelCase : Any = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, SchedulerType] , snake_case : Optimizer , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : int = 1 , snake_case : float = 1.0 , snake_case : int = -1 , )-> List[str]: '''simple docstring''' UpperCAmelCase__ : int = SchedulerType(a__ ) UpperCAmelCase__ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(a__ , last_epoch=a__ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(a__ , step_rules=a__ , last_epoch=a__ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(a__ , num_warmup_steps=a__ , last_epoch=a__ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( a__ , num_warmup_steps=a__ , num_training_steps=a__ , num_cycles=a__ , last_epoch=a__ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( a__ , num_warmup_steps=a__ , num_training_steps=a__ , power=a__ , last_epoch=a__ , ) return schedule_func( a__ , num_warmup_steps=a__ , num_training_steps=a__ , last_epoch=a__ )
371
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''efficientformer''' def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[str] = hidden_sizes UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[int] = patch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Optional[int] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Dict = downsamples UpperCAmelCase__ : Any = dim UpperCAmelCase__ : str = key_dim UpperCAmelCase__ : List[Any] = attention_ratio UpperCAmelCase__ : Optional[Any] = resolution UpperCAmelCase__ : Optional[Any] = pool_size UpperCAmelCase__ : Any = downsample_patch_size UpperCAmelCase__ : int = downsample_stride UpperCAmelCase__ : Dict = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[str] = distillation UpperCAmelCase__ : Dict = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Optional[int] = batch_norm_eps
298
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCAmelCase : str = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[Any] = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys _lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
350
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
0
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ): def __init__( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = [] def __a ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Any , **snake_case__ : Optional[int] ): '''simple docstring''' self.events.append("on_init_end" ) def __a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : str , **snake_case__ : Dict ): '''simple docstring''' self.events.append("on_train_begin" ) def __a ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Dict , **snake_case__ : Tuple ): '''simple docstring''' self.events.append("on_train_end" ) def __a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : str , **snake_case__ : List[str] ): '''simple docstring''' self.events.append("on_epoch_begin" ) def __a ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Any , **snake_case__ : Tuple ): '''simple docstring''' self.events.append("on_epoch_end" ) def __a ( self : Any , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : str , **snake_case__ : Optional[int] ): '''simple docstring''' self.events.append("on_step_begin" ) def __a ( self : Any , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , **snake_case__ : str ): '''simple docstring''' self.events.append("on_step_end" ) def __a ( self : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple , **snake_case__ : List[str] ): '''simple docstring''' self.events.append("on_evaluate" ) def __a ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Dict , **snake_case__ : Optional[Any] ): '''simple docstring''' self.events.append("on_predict" ) def __a ( self : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Any , **snake_case__ : Optional[Any] ): '''simple docstring''' self.events.append("on_save" ) def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : int , **snake_case__ : Optional[Any] ): '''simple docstring''' self.events.append("on_log" ) def __a ( self : Any , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Tuple , **snake_case__ : Union[str, Any] ): '''simple docstring''' self.events.append("on_prediction_step" ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp() def __a ( self : Any ): '''simple docstring''' shutil.rmtree(self.output_dir ) def __a ( self : Tuple , snake_case__ : Tuple=0 , snake_case__ : List[str]=0 , snake_case__ : Dict=6_4 , snake_case__ : Dict=6_4 , snake_case__ : Optional[Any]=None , snake_case__ : Optional[int]=False , **snake_case__ : Optional[Any] ): '''simple docstring''' # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. UpperCAmelCase__ : Optional[Any] = RegressionDataset(length=snake_case__ ) UpperCAmelCase__ : Union[str, Any] = RegressionDataset(length=snake_case__ ) UpperCAmelCase__ : int = RegressionModelConfig(a=snake_case__ , b=snake_case__ ) UpperCAmelCase__ : str = RegressionPreTrainedModel(snake_case__ ) UpperCAmelCase__ : str = TrainingArguments(self.output_dir , disable_tqdm=snake_case__ , report_to=[] , **snake_case__ ) return Trainer( snake_case__ , snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , callbacks=snake_case__ , ) def __a ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) # Order doesn't matter UpperCAmelCase__ : Any = sorted(snake_case__ , key=lambda snake_case__ : cb.__name__ if isinstance(snake_case__ , snake_case__ ) else cb.__class__.__name__ ) UpperCAmelCase__ : Tuple = sorted(snake_case__ , key=lambda snake_case__ : cb.__name__ if isinstance(snake_case__ , snake_case__ ) else cb.__class__.__name__ ) for cba, cba in zip(snake_case__ , snake_case__ ): if isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ): self.assertEqual(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ): self.assertEqual(snake_case__ , cba.__class__ ) elif not isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ): self.assertEqual(cba.__class__ , snake_case__ ) else: self.assertEqual(snake_case__ , snake_case__ ) def __a ( self : Dict , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = ["""on_init_end""", """on_train_begin"""] UpperCAmelCase__ : int = 0 UpperCAmelCase__ : str = len(trainer.get_eval_dataloader() ) UpperCAmelCase__ : str = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs ): expected_events.append("on_epoch_begin" ) for _ in range(snake_case__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save" ) expected_events.append("on_epoch_end" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_trainer() UpperCAmelCase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ ) # Callbacks passed at init are added to the default callbacks UpperCAmelCase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(snake_case__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback UpperCAmelCase__ : Any = self.get_trainer(disable_tqdm=snake_case__ ) UpperCAmelCase__ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] UpperCAmelCase__ : str = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(snake_case__ ) expected_callbacks.remove(snake_case__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ ) UpperCAmelCase__ : List[str] = self.get_trainer() UpperCAmelCase__ : Any = trainer.pop_callback(snake_case__ ) self.assertEqual(cb.__class__ , snake_case__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ ) trainer.add_callback(snake_case__ ) expected_callbacks.insert(0 , snake_case__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ ) # We can also add, pop, or remove by instance UpperCAmelCase__ : str = self.get_trainer() UpperCAmelCase__ : Tuple = trainer.callback_handler.callbacks[0] trainer.remove_callback(snake_case__ ) expected_callbacks.remove(snake_case__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ ) UpperCAmelCase__ : List[Any] = self.get_trainer() UpperCAmelCase__ : Tuple = trainer.callback_handler.callbacks[0] UpperCAmelCase__ : Tuple = trainer.pop_callback(snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ ) trainer.add_callback(snake_case__ ) expected_callbacks.insert(0 , snake_case__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ ) def __a ( self : str ): '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore" , category=snake_case__ ) UpperCAmelCase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() UpperCAmelCase__ : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) ) # Independent log/save/eval UpperCAmelCase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() UpperCAmelCase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) ) UpperCAmelCase__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() UpperCAmelCase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) ) UpperCAmelCase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" ) trainer.train() UpperCAmelCase__ : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) ) UpperCAmelCase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" ) trainer.train() UpperCAmelCase__ : List[Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) ) # A bit of everything UpperCAmelCase__ : str = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="steps" , ) trainer.train() UpperCAmelCase__ : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) ) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning" ) as warn_mock: UpperCAmelCase__ : Tuple = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(snake_case__ ) in warn_mock.call_args[0][0]
351
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
298
0
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def SCREAMING_SNAKE_CASE__ ( snake_case : dict )-> int: '''simple docstring''' return (data["data"], data["target"]) def SCREAMING_SNAKE_CASE__ ( snake_case : np.ndarray , snake_case : np.ndarray , snake_case : np.ndarray )-> List[Any]: '''simple docstring''' UpperCAmelCase__ : Dict = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ ) # Predict target for test data UpperCAmelCase__ : Any = xgb.predict(lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = predictions.reshape(len(lowerCAmelCase__ ) , 1 ) return predictions def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' UpperCAmelCase__ : List[Any] = fetch_california_housing() UpperCAmelCase__ : str = data_handling(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = train_test_split( lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 ) UpperCAmelCase__ : Union[str, Any] = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Error printing print(f'Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}' ) print(f'Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
352
"""simple docstring""" import functools def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int: '''simple docstring''' UpperCAmelCase__ : List[str] = len(snake_case ) UpperCAmelCase__ : str = len(snake_case ) @functools.cache def min_distance(snake_case : int , snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" import math import os import sys def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = "" try: with open(snake_case , "rb" ) as binary_file: UpperCAmelCase__ : Any = binary_file.read() for dat in data: UpperCAmelCase__ : Any = f'{dat:08b}' result += curr_byte return result except OSError: print("File not accessible" ) sys.exit() def SCREAMING_SNAKE_CASE__ ( snake_case : dict[str, str] , snake_case : str , snake_case : int , snake_case : str )-> Any: '''simple docstring''' lexicon.pop(snake_case ) UpperCAmelCase__ : Tuple = last_match_id if math.loga(snake_case ).is_integer(): for curr_key in lexicon: UpperCAmelCase__ : str = "0" + lexicon[curr_key] UpperCAmelCase__ : Optional[Any] = bin(snake_case )[2:] def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> Any: '''simple docstring''' UpperCAmelCase__ : Tuple = {"0": "0", "1": "1"} UpperCAmelCase__ , UpperCAmelCase__ : Tuple = "", "" UpperCAmelCase__ : Union[str, Any] = len(snake_case ) for i in range(len(snake_case ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue UpperCAmelCase__ : Dict = lexicon[curr_string] result += last_match_id add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case ) index += 1 UpperCAmelCase__ : Tuple = "" while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": UpperCAmelCase__ : List[str] = lexicon[curr_string] result += last_match_id return result def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = os.path.getsize(snake_case ) UpperCAmelCase__ : Any = bin(snake_case )[2:] UpperCAmelCase__ : Optional[int] = len(snake_case ) return "0" * (length_length - 1) + file_length_binary + compressed def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : str = 8 try: with open(snake_case , "wb" ) as opened_file: UpperCAmelCase__ : Optional[Any] = [ to_write[i : i + byte_length] for i in range(0 , len(snake_case ) , snake_case ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("10000000" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder="big" ) ) except OSError: print("File not accessible" ) sys.exit() def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> Dict: '''simple docstring''' UpperCAmelCase__ : List[str] = read_file_binary(snake_case ) UpperCAmelCase__ : str = compress_data(snake_case ) UpperCAmelCase__ : Any = add_file_length(snake_case , snake_case ) write_file_binary(snake_case , snake_case ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
353
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : List[Any] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase__ : Tuple = input_file.read() UpperCAmelCase__ : Tuple = regexp.search(snake_case__ ) return match def __a ( self : List[str] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase__ : Dict = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase__ : int = regexp.finditer(snake_case__ ) UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Path("./datasets" ) UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case__ ) ): raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = Path("./datasets" ) UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case__ ) ): raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
298
0
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel _lowerCAmelCase : int = { "text_branch": "text_model", "audio_branch": "audio_model.audio_encoder", "attn": "attention.self", "self.proj": "output.dense", "attention.self_mask": "attn_mask", "mlp.fc1": "intermediate.dense", "mlp.fc2": "output.dense", "norm1": "layernorm_before", "norm2": "layernorm_after", "bn0": "batch_norm", } _lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Tuple=False )-> Dict: '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = create_model( "HTSAT-tiny" , "roberta" , snake_case , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=snake_case , fusion_type="aff_2d" if enable_fusion else None , ) return model, model_cfg def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> Tuple: '''simple docstring''' UpperCAmelCase__ : Optional[int] = {} UpperCAmelCase__ : Union[str, Any] = r".*sequential.(\d+).*" UpperCAmelCase__ : str = r".*_projection.(\d+).*" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCAmelCase__ : List[Any] = key.replace(snake_case , snake_case ) if re.match(snake_case , snake_case ): # replace sequential layers with list UpperCAmelCase__ : Tuple = re.match(snake_case , snake_case ).group(1 ) UpperCAmelCase__ : List[Any] = key.replace(f'sequential.{sequential_layer}.' , f'layers.{int(snake_case )//3}.linear.' ) elif re.match(snake_case , snake_case ): UpperCAmelCase__ : Any = int(re.match(snake_case , snake_case ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... UpperCAmelCase__ : str = 1 if projecton_layer == 0 else 2 UpperCAmelCase__ : List[str] = key.replace(f'_projection.{projecton_layer}.' , f'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value UpperCAmelCase__ : List[Any] = value UpperCAmelCase__ : Union[str, Any] = mixed_qkv.size(0 ) // 3 UpperCAmelCase__ : Optional[int] = mixed_qkv[:qkv_dim] UpperCAmelCase__ : Any = mixed_qkv[qkv_dim : qkv_dim * 2] UpperCAmelCase__ : str = mixed_qkv[qkv_dim * 2 :] UpperCAmelCase__ : Dict = query_layer UpperCAmelCase__ : Optional[Any] = key_layer UpperCAmelCase__ : str = value_layer else: UpperCAmelCase__ : Union[str, Any] = value return model_state_dict def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Any=False )-> str: '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = init_clap(snake_case , enable_fusion=snake_case ) clap_model.eval() UpperCAmelCase__ : List[Any] = clap_model.state_dict() UpperCAmelCase__ : List[str] = rename_state_dict(snake_case ) UpperCAmelCase__ : List[str] = ClapConfig() UpperCAmelCase__ : int = enable_fusion UpperCAmelCase__ : Dict = ClapModel(snake_case ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case , strict=snake_case ) model.save_pretrained(snake_case ) transformers_config.save_pretrained(snake_case ) if __name__ == "__main__": _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") _lowerCAmelCase : int = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
354
"""simple docstring""" import numpy as np import datasets _lowerCAmelCase : Optional[int] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ _lowerCAmelCase : Tuple = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ _lowerCAmelCase : Optional[int] = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ): '''simple docstring''' # convert to numpy arrays UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ ) UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T ) try: UpperCAmelCase__ : str = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ ) UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ ) UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
298
0
"""simple docstring""" import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCAmelCase__ ( UpperCamelCase__ ): def __init__( self : int , *snake_case__ : Dict , snake_case__ : Tuple=None , snake_case__ : List[str]=None , **snake_case__ : Tuple ): '''simple docstring''' super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) UpperCAmelCase__ : List[str] = eval_examples UpperCAmelCase__ : Any = post_process_function def __a ( self : str , snake_case__ : Optional[Dataset] = None , snake_case__ : str=None , snake_case__ : Optional[List[str]] = None , snake_case__ : str = "eval" , **snake_case__ : Dict , ): '''simple docstring''' UpperCAmelCase__ : str = gen_kwargs.copy() UpperCAmelCase__ : str = ( gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length ) UpperCAmelCase__ : List[str] = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams ) UpperCAmelCase__ : Dict = gen_kwargs UpperCAmelCase__ : int = self.eval_dataset if eval_dataset is None else eval_dataset UpperCAmelCase__ : Tuple = self.get_eval_dataloader(UpperCamelCase_ ) UpperCAmelCase__ : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase__ : Optional[int] = self.compute_metrics UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = time.time() UpperCAmelCase__ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase__ : Optional[Any] = eval_loop( UpperCamelCase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , ) finally: UpperCAmelCase__ : int = compute_metrics UpperCAmelCase__ : Any = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCAmelCase__ : Union[str, Any] = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase__ : Any = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): UpperCAmelCase__ : Tuple = metrics.pop(UpperCamelCase_ ) metrics.update(output.metrics ) else: UpperCAmelCase__ : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCamelCase_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCAmelCase__ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ ) return metrics def __a ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any=None , snake_case__ : str = "test" , **snake_case__ : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = gen_kwargs.copy() UpperCAmelCase__ : List[str] = self.get_test_dataloader(UpperCamelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase__ : List[str] = self.compute_metrics UpperCAmelCase__ : str = None UpperCAmelCase__ : Tuple = time.time() UpperCAmelCase__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase__ : str = eval_loop( UpperCamelCase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , ) finally: UpperCAmelCase__ : Tuple = compute_metrics UpperCAmelCase__ : Tuple = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCAmelCase__ : Dict = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , "predict" ) UpperCAmelCase__ : Optional[int] = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): UpperCAmelCase__ : List[Any] = metrics.pop(UpperCamelCase_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
355
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =IFPipeline SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self : Dict ): '''simple docstring''' return self._get_dummy_components() def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): UpperCAmelCase__ : str = torch.manual_seed(snake_case__ ) else: UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase__ : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self : Tuple ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self : Tuple ): '''simple docstring''' # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __a ( self : Dict ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __a ( self : int ): '''simple docstring''' self._test_save_load_local() def __a ( self : Any ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self : Optional[Any] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : str ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Tuple ): '''simple docstring''' # if UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : List[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : str = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Tuple = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : str = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Optional[Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
298
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : str = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { 'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json', } class lowerCAmelCase__ ( lowerCamelCase__ ): SCREAMING_SNAKE_CASE_ ='''transfo-xl''' SCREAMING_SNAKE_CASE_ =['''mems'''] SCREAMING_SNAKE_CASE_ ={ '''n_token''': '''vocab_size''', '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Dict , snake_case__ : Any=2_6_7_7_3_5 , snake_case__ : Optional[int]=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , snake_case__ : List[str]=1_0_2_4 , snake_case__ : Union[str, Any]=1_0_2_4 , snake_case__ : List[str]=1_6 , snake_case__ : str=6_4 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=1_8 , snake_case__ : List[str]=1_6_0_0 , snake_case__ : List[str]=1_0_0_0 , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Optional[int]=0 , snake_case__ : List[str]=-1 , snake_case__ : int=True , snake_case__ : str=0.1 , snake_case__ : Any=0.0 , snake_case__ : List[str]=True , snake_case__ : Optional[Any]="normal" , snake_case__ : List[Any]=0.01 , snake_case__ : List[str]=0.01 , snake_case__ : str=0.02 , snake_case__ : Dict=1e-5 , snake_case__ : Optional[int]=0 , **snake_case__ : str , ): '''simple docstring''' UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : Tuple = [] self.cutoffs.extend(__lowerCamelCase ) if proj_share_all_but_first: UpperCAmelCase__ : Optional[Any] = [False] + [True] * len(self.cutoffs ) else: UpperCAmelCase__ : str = [False] + [False] * len(self.cutoffs ) UpperCAmelCase__ : str = d_model UpperCAmelCase__ : List[str] = d_embed UpperCAmelCase__ : Optional[Any] = d_head UpperCAmelCase__ : str = d_inner UpperCAmelCase__ : List[Any] = div_val UpperCAmelCase__ : Optional[int] = pre_lnorm UpperCAmelCase__ : Any = n_layer UpperCAmelCase__ : int = n_head UpperCAmelCase__ : Optional[int] = mem_len UpperCAmelCase__ : Tuple = same_length UpperCAmelCase__ : Optional[Any] = attn_type UpperCAmelCase__ : Tuple = clamp_len UpperCAmelCase__ : List[str] = sample_softmax UpperCAmelCase__ : Dict = adaptive UpperCAmelCase__ : str = dropout UpperCAmelCase__ : List[str] = dropatt UpperCAmelCase__ : List[str] = untie_r UpperCAmelCase__ : str = init UpperCAmelCase__ : Union[str, Any] = init_range UpperCAmelCase__ : Optional[int] = proj_init_std UpperCAmelCase__ : Optional[Any] = init_std UpperCAmelCase__ : Dict = layer_norm_epsilon super().__init__(eos_token_id=__lowerCamelCase , **__lowerCamelCase ) @property def __a ( self : Tuple ): '''simple docstring''' logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' ) return -1 @max_position_embeddings.setter def __a ( self : int , snake_case__ : Dict ): '''simple docstring''' raise NotImplementedError( f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
356
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } _lowerCAmelCase : List[Any] = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } _lowerCAmelCase : int = { """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = set() UpperCAmelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Dict = char UpperCAmelCase__ : Tuple = set(snake_case ) return pairs class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : Dict = vocab_file UpperCAmelCase__ : Tuple = merges_file UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Dict = 3 self.add_from_file(snake_case__ ) UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1] UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges] UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Dict = {} def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] UpperCAmelCase__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Tuple = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : Dict , snake_case__ : Tuple ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase__ : Any = get_pairs(snake_case__ ) if not pairs: return token while True: UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Tuple = 0 while i < len(snake_case__ ): try: UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Dict = j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Dict = tuple(snake_case__ ) UpperCAmelCase__ : List[Any] = new_word if len(snake_case__ ) == 1: break else: UpperCAmelCase__ : Dict = get_pairs(snake_case__ ) UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ ) UpperCAmelCase__ : Optional[int] = word[:-4] UpperCAmelCase__ : Union[str, Any] = word return word def __a ( self : List[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def __a ( self : Dict , snake_case__ : List[str] ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def __a ( self : List[Any] , snake_case__ : Any ): '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def __a ( self : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : Tuple = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : str = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ): copyfile(self.merges_file , snake_case__ ) return out_vocab_file, out_merge_file def __a ( self : List[Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): try: with open(snake_case__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' ) return UpperCAmelCase__ : Dict = f.readlines() for lineTmp in lines: UpperCAmelCase__ : Optional[int] = lineTmp.strip() UpperCAmelCase__ : Tuple = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) UpperCAmelCase__ : Any = line[:idx] UpperCAmelCase__ : str = len(self.encoder )
298
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : str = { "google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json", # See all CANINE models at https://huggingface.co/models?filter=canine } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ="""canine""" def __init__( self : Dict , snake_case__ : Dict=7_6_8 , snake_case__ : Dict=1_2 , snake_case__ : Union[str, Any]=1_2 , snake_case__ : List[str]=3_0_7_2 , snake_case__ : List[str]="gelu" , snake_case__ : Dict=0.1 , snake_case__ : int=0.1 , snake_case__ : List[str]=1_6_3_8_4 , snake_case__ : Any=1_6 , snake_case__ : Optional[int]=0.02 , snake_case__ : str=1e-12 , snake_case__ : List[str]=0 , snake_case__ : str=0xe_000 , snake_case__ : str=0xe_001 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=4 , snake_case__ : Optional[int]=8 , snake_case__ : str=1_6_3_8_4 , snake_case__ : int=1_2_8 , **snake_case__ : Dict , ): '''simple docstring''' super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) UpperCAmelCase__ : Union[str, Any] = max_position_embeddings UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : str = num_attention_heads UpperCAmelCase__ : Optional[int] = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : int = layer_norm_eps # Character config: UpperCAmelCase__ : Union[str, Any] = downsampling_rate UpperCAmelCase__ : Union[str, Any] = upsampling_kernel_size UpperCAmelCase__ : Union[str, Any] = num_hash_functions UpperCAmelCase__ : int = num_hash_buckets UpperCAmelCase__ : Tuple = local_transformer_stride
357
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 # setable values SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None @classmethod def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ ) @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ =42 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return True @register_to_config def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ): '''simple docstring''' UpperCAmelCase__ : Tuple = dtype def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ): '''simple docstring''' if common is None: UpperCAmelCase__ : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype ) UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , ) def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ): '''simple docstring''' return sample def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case__ , timesteps=snake_case__ , ) def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: UpperCAmelCase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": UpperCAmelCase__ : List[Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": UpperCAmelCase__ : List[str] = variance UpperCAmelCase__ : Optional[Any] = state.common.betas[t] UpperCAmelCase__ : Any = (predicted_variance + 1) / 2 UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if key is None: UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 ) else: UpperCAmelCase__ : int = None # 1. compute alphas, betas UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) UpperCAmelCase__ : List[str] = 1 - alpha_prod_t UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 ) UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ ) def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __len__( self : Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
298
0
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase__ ( lowerCamelCase__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): @property def __a ( self : int ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Tuple = ort.SessionOptions() UpperCAmelCase__ : int = False return options def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) UpperCAmelCase__ : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) UpperCAmelCase__ : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__snake_case ) UpperCAmelCase__ : Any = 'A red cat sitting on a park bench' UpperCAmelCase__ : int = np.random.RandomState(0 ) UpperCAmelCase__ : Dict = pipe( prompt=__snake_case , image=__snake_case , mask_image=__snake_case , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__snake_case , output_type="np" , ) UpperCAmelCase__ : Tuple = output.images UpperCAmelCase__ : List[Any] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) UpperCAmelCase__ : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) UpperCAmelCase__ : Optional[int] = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" ) UpperCAmelCase__ : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__snake_case ) UpperCAmelCase__ : str = 'A red cat sitting on a park bench' UpperCAmelCase__ : List[str] = np.random.RandomState(0 ) UpperCAmelCase__ : Optional[Any] = pipe( prompt=__snake_case , image=__snake_case , mask_image=__snake_case , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__snake_case , output_type="np" , ) UpperCAmelCase__ : Any = output.images UpperCAmelCase__ : List[Any] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ : Optional[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
358
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCAmelCase__ : def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : Union[str, Any] = act_dim UpperCAmelCase__ : Dict = state_dim UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : List[str] = max_length UpperCAmelCase__ : int = is_training def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 ) UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase__ : Optional[int] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __a ( self : int ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase__ : Optional[int] = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =() SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids SCREAMING_SNAKE_CASE_ =False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = DecisionTransformerModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : List[str] ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Tuple = [*signature.parameters.keys()] UpperCAmelCase__ : str = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Optional[int] = model.config torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset() UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ ) UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase__ : Union[str, Any] = state UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 ) for step in range(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model( states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1] UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 ) UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase__ : Tuple = torch.cat( [timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
298
0
"""simple docstring""" import unittest from knapsack import greedy_knapsack as kp class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0] UpperCAmelCase__ : Dict = [2, 4, 6, 8, 1_0, 1_2] UpperCAmelCase__ : Optional[int] = 1_0_0 self.assertEqual(kp.calc_profit(A__ , A__ , A__ ) , 2_1_0 ) def __a ( self : List[str] ): '''simple docstring''' self.assertRaisesRegex(A__ , "max_weight must greater than zero." ) def __a ( self : Dict ): '''simple docstring''' self.assertRaisesRegex(A__ , "Weight can not be negative." ) def __a ( self : Optional[int] ): '''simple docstring''' self.assertRaisesRegex(A__ , "Profit can not be negative." ) def __a ( self : str ): '''simple docstring''' self.assertRaisesRegex(A__ , "max_weight must greater than zero." ) def __a ( self : Any ): '''simple docstring''' self.assertRaisesRegex( A__ , "The length of profit and weight must be same." ) if __name__ == "__main__": unittest.main()
359
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
298
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCAmelCase__ ( __a , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =KandinskyInpaintPipeline SCREAMING_SNAKE_CASE_ =["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] SCREAMING_SNAKE_CASE_ =[ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] SCREAMING_SNAKE_CASE_ =[ """generator""", """height""", """width""", """latents""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] SCREAMING_SNAKE_CASE_ =False @property def __a ( self : List[Any] ): '''simple docstring''' return 3_2 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return 3_2 @property def __a ( self : int ): '''simple docstring''' return self.time_input_dim @property def __a ( self : Tuple ): '''simple docstring''' return self.time_input_dim * 4 @property def __a ( self : List[Any] ): '''simple docstring''' return 1_0_0 @property def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : str = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __a ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , ) UpperCAmelCase__ : Optional[int] = MultilingualCLIP(a__ ) UpperCAmelCase__ : int = text_encoder.eval() return text_encoder @property def __a ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : str = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCAmelCase__ : Tuple = UNetaDConditionModel(**a__ ) return model @property def __a ( self : str ): '''simple docstring''' return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __a ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs ) return model def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = self.dummy_text_encoder UpperCAmelCase__ : Union[str, Any] = self.dummy_tokenizer UpperCAmelCase__ : str = self.dummy_unet UpperCAmelCase__ : Dict = self.dummy_movq UpperCAmelCase__ : Tuple = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=a__ , set_alpha_to_one=a__ , steps_offset=1 , prediction_type="epsilon" , thresholding=a__ , ) UpperCAmelCase__ : Any = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __a ( self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict=0 ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ ) UpperCAmelCase__ : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ ) # create init_image UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(a__ ) ).to(a__ ) UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) # create mask UpperCAmelCase__ : Optional[Any] = np.ones((6_4, 6_4) , dtype=np.floataa ) UpperCAmelCase__ : Optional[Any] = 0 if str(a__ ).startswith("mps" ): UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(a__ ) else: UpperCAmelCase__ : int = torch.Generator(device=a__ ).manual_seed(a__ ) UpperCAmelCase__ : Any = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = "cpu" UpperCAmelCase__ : Optional[Any] = self.get_dummy_components() UpperCAmelCase__ : Optional[int] = self.pipeline_class(**a__ ) UpperCAmelCase__ : Tuple = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) UpperCAmelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) ) UpperCAmelCase__ : Optional[Any] = output.images UpperCAmelCase__ : Union[str, Any] = pipe( **self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0] UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] print(f'image.shape {image.shape}' ) assert image.shape == (1, 6_4, 6_4, 3) UpperCAmelCase__ : List[str] = np.array( [0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def __a ( self : List[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : int ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) UpperCAmelCase__ : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCAmelCase__ : Any = np.ones((7_6_8, 7_6_8) , dtype=np.floataa ) UpperCAmelCase__ : Any = 0 UpperCAmelCase__ : Tuple = "a hat" UpperCAmelCase__ : List[str] = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(a__ ) UpperCAmelCase__ : Optional[int] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) UpperCAmelCase__ : List[str] = pipeline.to(a__ ) pipeline.set_progress_bar_config(disable=a__ ) UpperCAmelCase__ : str = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = pipe_prior( a__ , generator=a__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCAmelCase__ : Any = pipeline( a__ , image=a__ , mask_image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , ) UpperCAmelCase__ : Dict = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(a__ , a__ )
360
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
0
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( """The `image_to_image.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionImg2ImgPipeline` instead.""" )
361
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy() UpperCAmelCase__ : List[Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
298
0
"""simple docstring""" from __future__ import annotations from math import ceil, floor, sqrt def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] = 200_0000 )-> int: '''simple docstring''' UpperCAmelCase__ : Tuple = [0] UpperCAmelCase__ : Dict = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target UpperCAmelCase__ : Any = 0 # the area corresponding to the grid that gives the product closest to target UpperCAmelCase__ : List[Any] = 0 # an estimate of b, using the quadratic formula UpperCAmelCase__ : Dict = 42 # the largest integer less than b_estimate UpperCAmelCase__ : int = 42 # the largest integer less than b_estimate UpperCAmelCase__ : Any = 42 # the triangle number corresponding to b_floor UpperCAmelCase__ : Tuple = 42 # the triangle number corresponding to b_ceil UpperCAmelCase__ : List[Any] = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): UpperCAmelCase__ : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 UpperCAmelCase__ : Union[str, Any] = floor(__snake_case ) UpperCAmelCase__ : Tuple = ceil(__snake_case ) UpperCAmelCase__ : Optional[Any] = triangle_numbers[b_floor] UpperCAmelCase__ : List[Any] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): UpperCAmelCase__ : Dict = triangle_b_first_guess * triangle_a UpperCAmelCase__ : Optional[int] = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): UpperCAmelCase__ : int = triangle_b_second_guess * triangle_a UpperCAmelCase__ : List[str] = idx_a * b_ceil return area if __name__ == "__main__": print(F"""{solution() = }""")
362
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : int = seq_length UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Union[str, Any] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Any = embedding_size UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : List[Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Tuple = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : Union[str, Any] = scope def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = AlbertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_labels UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.num_labels UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_choices UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Tuple = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =True def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ): '''simple docstring''' UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Dict = type self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" ) UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case__ ) UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
298
0
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py _lowerCAmelCase : Union[str, Any] = """.""" # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) _lowerCAmelCase : int = [ """Assert""", """AssignVariableOp""", """EmptyTensorList""", """MergeV2Checkpoints""", """ReadVariableOp""", """ResourceGather""", """RestoreV2""", """SaveV2""", """ShardedFilename""", """StatefulPartitionedCall""", """StaticRegexFullMatch""", """VarHandleOp""", ] def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : str , snake_case : List[str] )-> List[str]: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = SavedModel() UpperCAmelCase__ : Union[str, Any] = [] with open(os.path.join(UpperCAmelCase__ , "utils" , "tf_ops" , "onnx.json" ) ) as f: UpperCAmelCase__ : Dict = json.load(UpperCAmelCase__ )["opsets"] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(UpperCAmelCase__ )] ) with open(UpperCAmelCase__ , "rb" ) as f: saved_model.ParseFromString(f.read() ) UpperCAmelCase__ : Optional[int] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want UpperCAmelCase__ : Any = sorted(UpperCAmelCase__ ) UpperCAmelCase__ : List[str] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(UpperCAmelCase__ ) if strict and len(UpperCAmelCase__ ) > 0: raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops ) elif len(UpperCAmelCase__ ) > 0: print(f'Found the following incompatible ops for the opset {opset}:' ) print(*UpperCAmelCase__ , sep="\n" ) else: print(f'The saved model {saved_model_path} can properly be converted with ONNX.' ) if __name__ == "__main__": _lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""") parser.add_argument( """--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested.""" ) parser.add_argument( """--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model.""" ) parser.add_argument( """--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)""" ) _lowerCAmelCase : Optional[Any] = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
363
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = list(range(snake_case ) ) # Find permutation while factorials: UpperCAmelCase__ : str = factorials.pop() UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase__ ( snake_case__ ): SCREAMING_SNAKE_CASE_ =["""image_processor""", """tokenizer"""] SCREAMING_SNAKE_CASE_ ="""Pix2StructImageProcessor""" SCREAMING_SNAKE_CASE_ =("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = False super().__init__(_A , _A ) def __call__( self : str , snake_case__ : int=None , snake_case__ : Optional[int] = None , snake_case__ : Tuple = True , snake_case__ : Tuple = False , snake_case__ : Optional[Any] = None , snake_case__ : str = None , snake_case__ : Tuple = 2_0_4_8 , snake_case__ : Tuple = 0 , snake_case__ : List[str] = None , snake_case__ : Any = None , snake_case__ : Any = False , snake_case__ : Tuple = False , snake_case__ : int = False , snake_case__ : Tuple = False , snake_case__ : str = False , snake_case__ : Dict = True , snake_case__ : Union[str, Any] = None , **snake_case__ : str , ): '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None and not self.image_processor.is_vqa: UpperCAmelCase__ : Union[str, Any] = self.tokenizer UpperCAmelCase__ : str = self.tokenizer( text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values UpperCAmelCase__ : Optional[int] = self.image_processor( _A , return_tensors=_A , max_patches=_A , **_A ) else: # add pixel_values and bbox UpperCAmelCase__ : Any = self.image_processor( _A , return_tensors=_A , max_patches=_A , header_text=_A , **_A ) if text is not None and not self.image_processor.is_vqa: UpperCAmelCase__ : str = self.tokenizer( text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , ) if "attention_mask" in text_encoding: UpperCAmelCase__ : Optional[int] = text_encoding.pop("attention_mask" ) if "input_ids" in text_encoding: UpperCAmelCase__ : Optional[int] = text_encoding.pop("input_ids" ) else: UpperCAmelCase__ : List[str] = None if text_encoding is not None: encoding_image_processor.update(_A ) return encoding_image_processor def __a ( self : str , *snake_case__ : int , **snake_case__ : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*_A , **_A ) def __a ( self : List[Any] , *snake_case__ : Tuple , **snake_case__ : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*_A , **_A ) @property def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.tokenizer.model_input_names UpperCAmelCase__ : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
364
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _lowerCAmelCase : Union[str, Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0} UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : int = min_resolution UpperCAmelCase__ : Tuple = max_resolution UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : Optional[int] = do_normalize UpperCAmelCase__ : str = do_convert_rgb UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def __a ( self : str ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = PixaStructImageProcessingTester(self ) @property def __a ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ : Dict = 2_0_4_8 UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase__ : Optional[int] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(snake_case__ ): UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches UpperCAmelCase__ : Optional[Any] = "Hello" UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Dict ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Dict = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : List[str] = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Optional[int] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase__ : Optional[int] = 3 @property def __a ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : int ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
298
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCAmelCase : Optional[int] = 16 _lowerCAmelCase : Optional[int] = 32 def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : int = 16 )-> Dict: '''simple docstring''' UpperCAmelCase__ : str = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCAmelCase__ : str = load_dataset("glue" , "mrpc" ) def tokenize_function(snake_case : Dict ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ : List[str] = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ : Dict = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(snake_case : Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ : str = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase__ : Dict = 8 else: UpperCAmelCase__ : Union[str, Any] = None return tokenizer.pad( snake_case__ , padding="longest" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="pt" , ) # Instantiate dataloaders. UpperCAmelCase__ : Optional[int] = DataLoader( tokenized_datasets["train"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets["validation"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : List[Any] )-> List[str]: '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , snake_case__ ) == "1": UpperCAmelCase__ : List[str] = 2 # New Code # UpperCAmelCase__ : Optional[int] = int(args.gradient_accumulation_steps ) UpperCAmelCase__ : List[str] = int(args.local_sgd_steps ) # Initialize accelerator UpperCAmelCase__ : Optional[Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ : Union[str, Any] = config['lr'] UpperCAmelCase__ : Optional[int] = int(config["num_epochs"] ) UpperCAmelCase__ : List[Any] = int(config["seed"] ) UpperCAmelCase__ : List[str] = int(config["batch_size"] ) UpperCAmelCase__ : Optional[Any] = evaluate.load("glue" , "mrpc" ) set_seed(snake_case__ ) UpperCAmelCase__ : int = get_dataloaders(snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ : Dict = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ : List[str] = AdamW(params=model.parameters() , lr=snake_case__ ) # Instantiate scheduler UpperCAmelCase__ : Any = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ : str = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() with LocalSGD( accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(snake_case__ ): UpperCAmelCase__ : int = model(**snake_case__ ) UpperCAmelCase__ : Union[str, Any] = output.loss accelerator.backward(snake_case__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : Dict = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) UpperCAmelCase__ : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' UpperCAmelCase__ : Any = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=snake_case__ , default=snake_case__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=snake_case__ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument( "--local_sgd_steps" , type=snake_case__ , default=8 , help="Number of local SGD steps or None to disable local SGD" ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) UpperCAmelCase__ : str = parser.parse_args() UpperCAmelCase__ : Tuple = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
365
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : int = "mock-s3-bucket" UpperCAmelCase__ : Any = f's3://{mock_bucket}' UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case ) assert dataset_path.startswith("s3://" ) is False UpperCAmelCase__ : str = "./local/path" UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is True UpperCAmelCase__ : str = fsspec.filesystem("file" ) UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case ) UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} UpperCAmelCase__ : int = compressed_file_paths[protocol] UpperCAmelCase__ : Any = "dataset.jsonl" UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case ) UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(snake_case ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
298
0
"""simple docstring""" from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase__ : def __init__( self : List[Any] , snake_case__ : Dict , snake_case__ : Any=1_3 , snake_case__ : Any=3_0 , snake_case__ : Union[str, Any]=2 , snake_case__ : List[Any]=3 , snake_case__ : Optional[int]=True , snake_case__ : List[str]=True , snake_case__ : Any=3_2 , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=4 , snake_case__ : Optional[int]=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : Dict=1_0 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Optional[Any]=0.6 , snake_case__ : int=None , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : Optional[int] = image_size UpperCAmelCase__ : int = patch_size UpperCAmelCase__ : Any = num_channels UpperCAmelCase__ : Optional[int] = is_training UpperCAmelCase__ : int = use_labels UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Any = num_hidden_layers UpperCAmelCase__ : Tuple = num_attention_heads UpperCAmelCase__ : str = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : Dict = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : str = initializer_range UpperCAmelCase__ : List[str] = mask_ratio UpperCAmelCase__ : Union[str, Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCAmelCase__ : Optional[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Union[str, Any] = None if self.use_labels: UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : int = self.get_config() return config, pixel_values, labels def __a ( self : Any ): '''simple docstring''' return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def __a ( self : str , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = TFViTMAEModel(config=snake_case__ ) UpperCAmelCase__ : str = model(snake_case__ , training=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = TFViTMAEForPreTraining(snake_case__ ) UpperCAmelCase__ : int = model(snake_case__ , training=snake_case__ ) # expected sequence length = num_patches UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2 UpperCAmelCase__ : Tuple = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCAmelCase__ : Union[str, Any] = 1 UpperCAmelCase__ : str = TFViTMAEForPreTraining(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : str = model(snake_case__ , training=snake_case__ ) UpperCAmelCase__ : int = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : str = config_and_inputs UpperCAmelCase__ : List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {} SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = TFViTMAEModelTester(self ) UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def __a ( self : Union[str, Any] ): '''simple docstring''' pass def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Any = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCAmelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Layer ) ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[int] = model_class(snake_case__ ) UpperCAmelCase__ : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def __a ( self : Any ): '''simple docstring''' # make the mask reproducible np.random.seed(2 ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase__ : str = model_class(snake_case__ ) UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ , noise=snake_case__ ) UpperCAmelCase__ : Union[str, Any] = copy.deepcopy(self._prepare_for_class(snake_case__ , snake_case__ ) ) UpperCAmelCase__ : Optional[int] = model(**snake_case__ , noise=snake_case__ ) UpperCAmelCase__ : Tuple = outputs_dict[0].numpy() UpperCAmelCase__ : List[Any] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 ) def __a ( self : Union[str, Any] ): '''simple docstring''' # make the mask reproducible np.random.seed(2 ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(snake_case__ : Tuple ): UpperCAmelCase__ : Optional[int] = {} for k, v in inputs_dict.items(): if tf.is_tensor(snake_case__ ): UpperCAmelCase__ : List[str] = v.numpy() else: UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) return inputs_np_dict for model_class in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class(snake_case__ ) UpperCAmelCase__ : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase__ : str = prepare_numpy_arrays(snake_case__ ) UpperCAmelCase__ : Tuple = model(snake_case__ , noise=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ , noise=snake_case__ ) self.assert_outputs_same(snake_case__ , snake_case__ ) def __a ( self : Tuple , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Union[str, Any] ): '''simple docstring''' # make masks reproducible np.random.seed(2 ) UpperCAmelCase__ : Tuple = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCAmelCase__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase__ : List[str] = tf.constant(snake_case__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCAmelCase__ : Any = tf_noise super().check_pt_tf_models(snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' # make mask reproducible np.random.seed(2 ) UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Tuple = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(snake_case__ ) if module_member_name.endswith("MainLayer" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )] for module_member in (getattr(snake_case__ , snake_case__ ),) if isinstance(snake_case__ , snake_case__ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(snake_case__ , "_keras_serializable" , snake_case__ ) } UpperCAmelCase__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase__ : Tuple = tf.convert_to_tensor(snake_case__ ) inputs_dict.update({"noise": noise} ) for main_layer_class in tf_main_layer_classes: UpperCAmelCase__ : Union[str, Any] = main_layer_class(snake_case__ ) UpperCAmelCase__ : List[str] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCAmelCase__ : str = tf.keras.Model(snake_case__ , outputs=main_layer(snake_case__ ) ) UpperCAmelCase__ : Dict = model(snake_case__ ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ : List[Any] = os.path.join(snake_case__ , "keras_model.h5" ) model.save(snake_case__ ) UpperCAmelCase__ : int = tf.keras.models.load_model( snake_case__ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(snake_case__ , tf.keras.Model ) UpperCAmelCase__ : int = model(snake_case__ ) self.assert_outputs_same(snake_case__ , snake_case__ ) @slow def __a ( self : Dict ): '''simple docstring''' # make mask reproducible np.random.seed(2 ) UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : int = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : Any = self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ , noise=snake_case__ ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase__ : Union[str, Any] = outputs.last_hidden_state.numpy() UpperCAmelCase__ : List[Any] = 0 else: UpperCAmelCase__ : Optional[Any] = outputs.logits.numpy() UpperCAmelCase__ : Optional[int] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case__ , saved_model=snake_case__ ) UpperCAmelCase__ : str = model_class.from_pretrained(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , noise=snake_case__ ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase__ : Optional[Any] = after_outputs["last_hidden_state"].numpy() UpperCAmelCase__ : List[Any] = 0 else: UpperCAmelCase__ : Optional[int] = after_outputs["logits"].numpy() UpperCAmelCase__ : Any = 0 UpperCAmelCase__ : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case__ , 1e-5 ) def __a ( self : Union[str, Any] ): '''simple docstring''' # make mask reproducible np.random.seed(2 ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[int] = model_class(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ , noise=snake_case__ ) UpperCAmelCase__ : Optional[int] = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(snake_case__ ) UpperCAmelCase__ : List[Any] = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCAmelCase__ : Tuple = model_class.from_config(model.config ) UpperCAmelCase__ : int = new_model(snake_case__ ) # Build model new_model.set_weights(model.get_weights() ) UpperCAmelCase__ : str = new_model(snake_case__ , noise=snake_case__ ) self.assert_outputs_same(snake_case__ , snake_case__ ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def __a ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def __a ( self : int ): '''simple docstring''' pass @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: UpperCAmelCase__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : List[str] ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def __a ( self : List[Any] ): '''simple docstring''' # make random mask reproducible across the PT and TF model np.random.seed(2 ) UpperCAmelCase__ : int = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ) UpperCAmelCase__ : Union[str, Any] = self.default_image_processor UpperCAmelCase__ : Tuple = prepare_img() UpperCAmelCase__ : List[str] = image_processor(images=snake_case__ , return_tensors="tf" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCAmelCase__ : Union[str, Any] = ViTMAEConfig() UpperCAmelCase__ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCAmelCase__ : int = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCAmelCase__ : List[str] = model(**snake_case__ , noise=snake_case__ ) # verify the logits UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] ) self.assertEqual(outputs.logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[int] = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case__ , atol=1e-4 )
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''megatron-bert''' def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Any = max_position_embeddings UpperCAmelCase__ : Dict = type_vocab_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : int = layer_norm_eps UpperCAmelCase__ : Optional[Any] = position_embedding_type UpperCAmelCase__ : Any = use_cache
298
0
"""simple docstring""" import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =DownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""down""" def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =ResnetDownsampleBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""down""" def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =AttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""down""" def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =CrossAttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""down""" def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase__ : Optional[Any] = 3_2 return init_dict, inputs_dict def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =SimpleCrossAttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""down""" @property def __a ( self : List[Any] ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=__lowercase ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : str = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase__ : str = 3_2 return init_dict, inputs_dict @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =SkipDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""down""" @property def __a ( self : int ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=__lowercase ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =AttnSkipDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""down""" @property def __a ( self : Optional[Any] ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=__lowercase ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =DownEncoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""down""" @property def __a ( self : Dict ): '''simple docstring''' return super().get_dummy_input(include_temb=__lowercase ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''in_channels''': 3_2, '''out_channels''': 3_2, } UpperCAmelCase__ : List[Any] = self.dummy_input return init_dict, inputs_dict def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =AttnDownEncoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""down""" @property def __a ( self : Union[str, Any] ): '''simple docstring''' return super().get_dummy_input(include_temb=__lowercase ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = { '''in_channels''': 3_2, '''out_channels''': 3_2, } UpperCAmelCase__ : List[Any] = self.dummy_input return init_dict, inputs_dict def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =UNetMidBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""mid""" def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = { '''in_channels''': 3_2, '''temb_channels''': 1_2_8, } UpperCAmelCase__ : Dict = self.dummy_input return init_dict, inputs_dict def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =UNetMidBlockaDCrossAttn # noqa F405 SCREAMING_SNAKE_CASE_ ="""mid""" def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase__ : Optional[Any] = 3_2 return init_dict, inputs_dict def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =UNetMidBlockaDSimpleCrossAttn # noqa F405 SCREAMING_SNAKE_CASE_ ="""mid""" @property def __a ( self : Optional[Any] ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=__lowercase ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase__ : int = 3_2 return init_dict, inputs_dict def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =UpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""up""" @property def __a ( self : int ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =ResnetUpsampleBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""up""" @property def __a ( self : Dict ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =CrossAttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""up""" @property def __a ( self : List[str] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = 3_2 return init_dict, inputs_dict def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =SimpleCrossAttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""up""" @property def __a ( self : str ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase , include_encoder_hidden_states=__lowercase ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = super().prepare_init_args_and_inputs_for_common() UpperCAmelCase__ : int = 3_2 return init_dict, inputs_dict def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =AttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""up""" @property def __a ( self : Optional[int] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =SkipUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""up""" @property def __a ( self : Dict ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =AttnSkipUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""up""" @property def __a ( self : int ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =UpDecoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""up""" @property def __a ( self : int ): '''simple docstring''' return super().get_dummy_input(include_temb=__lowercase ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = {'''in_channels''': 3_2, '''out_channels''': 3_2} UpperCAmelCase__ : List[Any] = self.dummy_input return init_dict, inputs_dict def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] super().test_output(__lowercase ) class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =AttnUpDecoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ ="""up""" @property def __a ( self : Union[str, Any] ): '''simple docstring''' return super().get_dummy_input(include_temb=__lowercase ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = {'''in_channels''': 3_2, '''out_channels''': 3_2} UpperCAmelCase__ : int = self.dummy_input return init_dict, inputs_dict def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] super().test_output(__lowercase )
367
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Dict ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , ) def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def __a ( self : Any , snake_case__ : str , snake_case__ : str ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class lowerCAmelCase__ ( __magic_name__ ): @require_beam def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : Dict ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Dict = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _lowerCAmelCase : Any = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCamelCase__ ): SCREAMING_SNAKE_CASE_ =["""input_values""", """padding_mask"""] def __init__( self : Tuple , snake_case__ : int = 1 , snake_case__ : int = 2_4_0_0_0 , snake_case__ : float = 0.0 , snake_case__ : float = None , snake_case__ : float = None , **snake_case__ : str , ): '''simple docstring''' super().__init__(feature_size=__a , sampling_rate=__a , padding_value=__a , **__a ) UpperCAmelCase__ : List[Any] = chunk_length_s UpperCAmelCase__ : Optional[int] = overlap @property def __a ( self : Any ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __a ( self : List[Any] ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self : int , snake_case__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case__ : Optional[Union[bool, str, PaddingStrategy]] = None , snake_case__ : Optional[bool] = False , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Optional[int] = None , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs UpperCAmelCase__ : List[str] = True UpperCAmelCase__ : Tuple = bool( isinstance(__a , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : int = [np.asarray(__a , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__a , np.ndarray ): UpperCAmelCase__ : List[Any] = np.asarray(__a , dtype=np.floataa ) elif isinstance(__a , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : Union[str, Any] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : List[Any] = [np.asarray(__a ).T] # verify inputs are valid for idx, example in enumerate(__a ): if example.ndim > 2: raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'Expected stereo audio but example has {example.shape[-1]} channels' ) UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : str = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: UpperCAmelCase__ : int = min(array.shape[0] for array in raw_audio ) UpperCAmelCase__ : str = int(np.floor(max_length / self.chunk_stride ) ) UpperCAmelCase__ : Optional[int] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: UpperCAmelCase__ : List[Any] = max(array.shape[0] for array in raw_audio ) UpperCAmelCase__ : Any = int(np.ceil(max_length / self.chunk_stride ) ) UpperCAmelCase__ : Tuple = (nb_step - 1) * self.chunk_stride + self.chunk_length UpperCAmelCase__ : List[Any] = "max_length" else: UpperCAmelCase__ : Any = input_values # normal padding on batch if padded_inputs is None: UpperCAmelCase__ : List[str] = self.pad( __a , max_length=__a , truncation=__a , padding=__a , return_attention_mask=__a , ) if padding: UpperCAmelCase__ : int = padded_inputs.pop("attention_mask" ) UpperCAmelCase__ : Union[str, Any] = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: UpperCAmelCase__ : str = example[..., None] input_values.append(example.T ) UpperCAmelCase__ : Tuple = input_values if return_tensors is not None: UpperCAmelCase__ : Optional[Any] = padded_inputs.convert_to_tensors(__a ) return padded_inputs
368
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =XLMTokenizer SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case__ ) ) def __a ( self : Union[str, Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = "lower newer" UpperCAmelCase__ : Optional[Any] = "lower newer" return input_text, output_text def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : List[Any] = "lower" UpperCAmelCase__ : Any = ["low", "er</w>"] UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"] UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
298
0
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Tuple = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCAmelCase__ ( _UpperCamelCase ): SCREAMING_SNAKE_CASE_ ='xlnet' SCREAMING_SNAKE_CASE_ =['mems'] SCREAMING_SNAKE_CASE_ ={ 'n_token': 'vocab_size', # Backward compatibility 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[Any] , snake_case__ : List[Any]=3_2_0_0_0 , snake_case__ : int=1_0_2_4 , snake_case__ : Optional[Any]=2_4 , snake_case__ : str=1_6 , snake_case__ : int=4_0_9_6 , snake_case__ : int="gelu" , snake_case__ : Any=True , snake_case__ : Dict="bi" , snake_case__ : List[str]=0.02 , snake_case__ : str=1e-12 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=5_1_2 , snake_case__ : List[str]=None , snake_case__ : List[Any]=True , snake_case__ : Optional[int]=False , snake_case__ : Tuple=False , snake_case__ : List[Any]=-1 , snake_case__ : Optional[Any]=False , snake_case__ : List[str]="last" , snake_case__ : Dict=True , snake_case__ : Tuple="tanh" , snake_case__ : str=0.1 , snake_case__ : List[Any]=5 , snake_case__ : str=5 , snake_case__ : Dict=5 , snake_case__ : Optional[int]=1 , snake_case__ : List[Any]=2 , **snake_case__ : Optional[Any] , ): '''simple docstring''' UpperCAmelCase__ : Dict = vocab_size UpperCAmelCase__ : Union[str, Any] = d_model UpperCAmelCase__ : Optional[int] = n_layer UpperCAmelCase__ : Union[str, Any] = n_head if d_model % n_head != 0: raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' ) UpperCAmelCase__ : List[Any] = d_model // n_head UpperCAmelCase__ : Dict = ff_activation UpperCAmelCase__ : int = d_inner UpperCAmelCase__ : int = untie_r UpperCAmelCase__ : Dict = attn_type UpperCAmelCase__ : Tuple = initializer_range UpperCAmelCase__ : Tuple = layer_norm_eps UpperCAmelCase__ : Union[str, Any] = dropout UpperCAmelCase__ : int = mem_len UpperCAmelCase__ : str = reuse_len UpperCAmelCase__ : List[Any] = bi_data UpperCAmelCase__ : Dict = clamp_len UpperCAmelCase__ : int = same_length UpperCAmelCase__ : int = summary_type UpperCAmelCase__ : Any = summary_use_proj UpperCAmelCase__ : Dict = summary_activation UpperCAmelCase__ : Tuple = summary_last_dropout UpperCAmelCase__ : Any = start_n_top UpperCAmelCase__ : Optional[Any] = end_n_top UpperCAmelCase__ : List[str] = bos_token_id UpperCAmelCase__ : Dict = pad_token_id UpperCAmelCase__ : List[Any] = eos_token_id if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`" " instead." , _UpperCAmelCase , ) UpperCAmelCase__ : List[Any] = kwargs['use_cache'] UpperCAmelCase__ : Optional[Any] = use_mems_eval UpperCAmelCase__ : List[Any] = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def __a ( self : Optional[Any] ): '''simple docstring''' logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' ) return -1 @max_position_embeddings.setter def __a ( self : List[str] , snake_case__ : List[str] ): '''simple docstring''' # Message copied from Transformer-XL documentation raise NotImplementedError( f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
369
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ): '''simple docstring''' UpperCAmelCase__ : Any = "bilinear" UpperCAmelCase__ : Any = max_size UpperCAmelCase__ : Any = short_edge_length def __call__( self : Dict , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for img in imgs: UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ ) if h < w: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w else: UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size if max(snake_case__ , snake_case__ ) > self.max_size: UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = newh * scale UpperCAmelCase__ : int = neww * scale UpperCAmelCase__ : List[Any] = int(neww + 0.5 ) UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase__ : Any = Image.fromarray(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ ) else: UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase__ : Tuple = nn.functional.interpolate( snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 ) img_augs.append(snake_case__ ) return img_augs class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase__ : Any = cfg.INPUT.FORMAT UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY UpperCAmelCase__ : str = cfg.PAD_VALUE UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std def __a ( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images] UpperCAmelCase__ : int = [ nn.functional.pad( snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case__ , snake_case__ ) ] return torch.stack(snake_case__ ), torch.tensor(snake_case__ ) def __call__( self : str , snake_case__ : int , snake_case__ : int=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [images] if single_image: assert len(snake_case__ ) == 1 for i in range(len(snake_case__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase__ : Tuple = self.aug(snake_case__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images] # now pad them to do the following operations UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int: '''simple docstring''' assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size tensor[:, 0].clamp_(min=0 , max=snake_case ) tensor[:, 1].clamp_(min=0 , max=snake_case ) tensor[:, 2].clamp_(min=0 , max=snake_case ) tensor[:, 3].clamp_(min=0 , max=snake_case )
298
0
"""simple docstring""" import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Union[str, Any] )-> Any: '''simple docstring''' UpperCAmelCase__ : Tuple = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu" ) UpperCAmelCase__ : int = chkpt["model"] # We have the base model one level deeper than the original XLM repository UpperCAmelCase__ : Optional[int] = {} for k, v in state_dict.items(): if "pred_layer" in k: UpperCAmelCase__ : Any = v else: UpperCAmelCase__ : List[str] = v UpperCAmelCase__ : Optional[Any] = chkpt["params"] UpperCAmelCase__ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )} UpperCAmelCase__ : Optional[Any] = chkpt["dico_word2id"] UpperCAmelCase__ : Optional[int] = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()} # Save pytorch-model UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME UpperCAmelCase__ : List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME UpperCAmelCase__ : Dict = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__SCREAMING_SNAKE_CASE , indent=2 ) + "\n" ) print(f'Save vocab file to {pytorch_config_dump_path}' ) with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__SCREAMING_SNAKE_CASE , indent=2 ) + "\n" ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCAmelCase : Any = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
370
"""simple docstring""" import qiskit def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" ) UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
298
0
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : def __init__( self : int , snake_case__ : int , snake_case__ : str=1_3 , snake_case__ : int=3_2 , snake_case__ : Union[str, Any]=2 , snake_case__ : int=3 , snake_case__ : List[str]=1_6 , snake_case__ : List[str]=[1, 2, 1] , snake_case__ : List[Any]=[2, 2, 4] , snake_case__ : List[str]=2 , snake_case__ : Union[str, Any]=2.0 , snake_case__ : List[Any]=True , snake_case__ : List[str]=0.0 , snake_case__ : str=0.0 , snake_case__ : Optional[Any]=0.1 , snake_case__ : str="gelu" , snake_case__ : List[str]=False , snake_case__ : Tuple=True , snake_case__ : Tuple=0.02 , snake_case__ : Optional[int]=1e-5 , snake_case__ : Any=True , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=True , snake_case__ : Any=1_0 , snake_case__ : Any=8 , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : Dict = patch_size UpperCAmelCase__ : str = num_channels UpperCAmelCase__ : Dict = embed_dim UpperCAmelCase__ : Union[str, Any] = depths UpperCAmelCase__ : Any = num_heads UpperCAmelCase__ : Union[str, Any] = window_size UpperCAmelCase__ : str = mlp_ratio UpperCAmelCase__ : Tuple = qkv_bias UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase__ : str = drop_path_rate UpperCAmelCase__ : Union[str, Any] = hidden_act UpperCAmelCase__ : str = use_absolute_embeddings UpperCAmelCase__ : Any = patch_norm UpperCAmelCase__ : Union[str, Any] = layer_norm_eps UpperCAmelCase__ : Tuple = initializer_range UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : Union[str, Any] = scope UpperCAmelCase__ : List[Any] = use_labels UpperCAmelCase__ : List[Any] = type_sequence_label_size UpperCAmelCase__ : Tuple = encoder_stride def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Union[str, Any] = None if self.use_labels: UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def __a ( self : List[Any] ): '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = SwinvaModel(config=__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase__ : Optional[Any] = model(__snake_case ) UpperCAmelCase__ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __a ( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Tuple = SwinvaForMaskedImageModeling(config=__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase__ : Optional[int] = model(__snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase__ : Optional[Any] = 1 UpperCAmelCase__ : int = SwinvaForMaskedImageModeling(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.type_sequence_label_size UpperCAmelCase__ : Optional[int] = SwinvaForImageClassification(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase__ : Optional[Any] = model(__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs UpperCAmelCase__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( {'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = SwinvaModelTester(self ) UpperCAmelCase__ : int = ConfigTester(self , config_class=__snake_case , embed_dim=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." ) def __a ( self : str ): '''simple docstring''' pass @unittest.skip(reason="Swinv2 does not use inputs_embeds" ) def __a ( self : int ): '''simple docstring''' pass def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class(__snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) ) def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class(__snake_case ) UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase__ : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __snake_case ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : int = True for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[Any] = True UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : List[Any] = True UpperCAmelCase__ : Dict = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(__snake_case , __snake_case ) ) UpperCAmelCase__ : Union[str, Any] = outputs.attentions UpperCAmelCase__ : Dict = len(self.model_tester.depths ) self.assertEqual(len(__snake_case ) , __snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : Optional[Any] = config.window_size**2 UpperCAmelCase__ : List[Any] = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__snake_case , __snake_case ) ) UpperCAmelCase__ : int = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) UpperCAmelCase__ : Dict = len(__snake_case ) # Check attention is always last and order is fine UpperCAmelCase__ : Dict = True UpperCAmelCase__ : int = True UpperCAmelCase__ : List[Any] = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): UpperCAmelCase__ : Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) ) if hasattr(self.model_tester , "num_hidden_states_types" ): UpperCAmelCase__ : Dict = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states UpperCAmelCase__ : Optional[Any] = 2 self.assertEqual(out_len + added_hidden_states , len(__snake_case ) ) UpperCAmelCase__ : Dict = outputs.attentions self.assertEqual(len(__snake_case ) , __snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def __a ( self : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): UpperCAmelCase__ : Any = model(**self._prepare_for_class(__snake_case , __snake_case ) ) UpperCAmelCase__ : List[Any] = outputs.hidden_states UpperCAmelCase__ : Optional[Any] = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__snake_case ) , __snake_case ) # Swinv2 has a different seq_length UpperCAmelCase__ : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase__ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) UpperCAmelCase__ : str = outputs.reshaped_hidden_states self.assertEqual(len(__snake_case ) , __snake_case ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = reshaped_hidden_states[0].shape UpperCAmelCase__ : str = ( reshaped_hidden_states[0].view(__snake_case , __snake_case , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[Any] = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : List[str] = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case ) def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = 3 UpperCAmelCase__ : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase__ : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase__ : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : Tuple = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) ) def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case ) @slow def __a ( self : Optional[int] ): '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Any = SwinvaModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : List[str] = _config_zero_init(__snake_case ) for model_class in self.all_model_classes: UpperCAmelCase__ : List[Any] = model_class(config=__snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Any ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ) if is_vision_available() else None ) @slow def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to( __snake_case ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) UpperCAmelCase__ : Tuple = image_processor(images=__snake_case , return_tensors="pt" ).to(__snake_case ) # forward pass with torch.no_grad(): UpperCAmelCase__ : int = model(**__snake_case ) # verify the logits UpperCAmelCase__ : Optional[int] = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __snake_case ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
371
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''efficientformer''' def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[str] = hidden_sizes UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[int] = patch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Optional[int] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Dict = downsamples UpperCAmelCase__ : Any = dim UpperCAmelCase__ : str = key_dim UpperCAmelCase__ : List[Any] = attention_ratio UpperCAmelCase__ : Optional[Any] = resolution UpperCAmelCase__ : Optional[Any] = pool_size UpperCAmelCase__ : Any = downsample_patch_size UpperCAmelCase__ : int = downsample_stride UpperCAmelCase__ : Dict = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[str] = distillation UpperCAmelCase__ : Dict = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Optional[int] = batch_norm_eps
298
0
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowerCAmelCase__ : def __init__( self : str , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=1_3 , snake_case__ : Any=7 , snake_case__ : str=False , snake_case__ : Union[str, Any]=True , snake_case__ : int=False , snake_case__ : List[str]=False , snake_case__ : List[Any]=1_9 , snake_case__ : List[str]=3_2 , snake_case__ : int=5 , snake_case__ : Dict=4 , snake_case__ : str=3_7 , snake_case__ : Any="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : str=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=1_6 , snake_case__ : List[Any]=2 , snake_case__ : int=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Tuple=4 , snake_case__ : List[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : Optional[Any] = seq_length UpperCAmelCase__ : List[Any] = is_training UpperCAmelCase__ : Tuple = use_input_mask UpperCAmelCase__ : Dict = use_token_type_ids UpperCAmelCase__ : Tuple = use_labels UpperCAmelCase__ : Optional[Any] = vocab_size UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : str = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[Any] = max_position_embeddings UpperCAmelCase__ : Optional[Any] = type_vocab_size UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : Tuple = num_choices UpperCAmelCase__ : Optional[Any] = scope def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : List[Any] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[Any] = None UpperCAmelCase__ : str = None UpperCAmelCase__ : Union[str, Any] = None if self.use_labels: UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = EsmConfig( vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=snake_case__ , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , ) return config def __a ( self : Any , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = EsmForProteinFolding(config=snake_case__ ).float() model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ ) UpperCAmelCase__ : Tuple = model(snake_case__ ) UpperCAmelCase__ : List[str] = model(snake_case__ ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() ( UpperCAmelCase__ ) : List[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =(EsmForProteinFolding,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =() SCREAMING_SNAKE_CASE_ ={} if is_torch_available() else {} SCREAMING_SNAKE_CASE_ =False def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = EsmFoldModelTester(self ) UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) @unittest.skip("Does not support attention outputs" ) def __a ( self : Dict ): '''simple docstring''' pass @unittest.skip def __a ( self : Dict ): '''simple docstring''' pass @unittest.skip("Esm does not support embedding resizing" ) def __a ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip("Esm does not support embedding resizing" ) def __a ( self : Dict ): '''simple docstring''' pass @unittest.skip("ESMFold does not support passing input embeds!" ) def __a ( self : int ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __a ( self : Any ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __a ( self : List[Any] ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __a ( self : List[str] ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __a ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip("ESMFold does not support head pruning." ) def __a ( self : List[str] ): '''simple docstring''' pass @unittest.skip("ESMFold does not output hidden states in the normal way." ) def __a ( self : Dict ): '''simple docstring''' pass @unittest.skip("ESMfold does not output hidden states in the normal way." ) def __a ( self : List[Any] ): '''simple docstring''' pass @unittest.skip("ESMFold only has one output format." ) def __a ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" ) def __a ( self : Any ): '''simple docstring''' pass @unittest.skip("ESMFold does not support input chunking." ) def __a ( self : Dict ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." ) def __a ( self : Any ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def __a ( self : Tuple ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def __a ( self : List[Any] ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def __a ( self : Optional[Any] ): '''simple docstring''' pass @unittest.skip("ESMFold doesn't support data parallel." ) def __a ( self : List[str] ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __a ( self : Dict ): '''simple docstring''' pass @require_torch class lowerCAmelCase__ ( __magic_name__ ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float() model.eval() UpperCAmelCase__ : Tuple = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) UpperCAmelCase__ : Any = model(snake_case__ )["positions"] UpperCAmelCase__ : Tuple = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , snake_case__ , atol=1e-4 ) )
350
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
0
"""simple docstring""" from manim import * class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase__ : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )] UpperCAmelCase__ : Dict = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 ) UpperCAmelCase__ : Optional[Any] = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 ) UpperCAmelCase__ : Any = VGroup(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0 ) UpperCAmelCase__ : Union[str, Any] = Text("CPU" , font_size=2_4 ) UpperCAmelCase__ : Optional[int] = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(snake_case__ ) UpperCAmelCase__ : Dict = [mem.copy() for i in range(4 )] UpperCAmelCase__ : List[Any] = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 ) UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=2_4 ) UpperCAmelCase__ : Any = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ ) gpu.move_to([-1, -1, 0] ) self.add(snake_case__ ) UpperCAmelCase__ : List[str] = [mem.copy() for i in range(6 )] UpperCAmelCase__ : List[str] = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 ) UpperCAmelCase__ : Optional[int] = Text("Model" , font_size=2_4 ) UpperCAmelCase__ : List[Any] = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ ) model.move_to([3, -1.0, 0] ) self.add(snake_case__ ) UpperCAmelCase__ : Any = [] for i, rect in enumerate(snake_case__ ): rect.set_stroke(snake_case__ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) UpperCAmelCase__ : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=snake_case__ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=snake_case__ , buff=0.0 ) self.add(snake_case__ ) cpu_targs.append(snake_case__ ) UpperCAmelCase__ : Any = [mem.copy() for i in range(6 )] UpperCAmelCase__ : Tuple = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 ) UpperCAmelCase__ : List[str] = Text("Loaded Checkpoint" , font_size=2_4 ) UpperCAmelCase__ : Dict = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , aligned_edge=snake_case__ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) UpperCAmelCase__ : Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase__ : List[str] = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = MarkupText( f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=1_8 , ) blue_text.next_to(snake_case__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) UpperCAmelCase__ : Optional[Any] = MarkupText( f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case__ ) , Write(snake_case__ ) ) self.play(Write(snake_case__ , run_time=1 ) , Create(snake_case__ , run_time=1 ) ) UpperCAmelCase__ : Tuple = [] UpperCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(snake_case__ ): UpperCAmelCase__ : List[str] = fill.copy().set_fill(snake_case__ , opacity=0.7 ) target.move_to(snake_case__ ) first_animations.append(GrowFromCenter(snake_case__ , run_time=1 ) ) UpperCAmelCase__ : Dict = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(snake_case__ , run_time=1.5 ) ) self.play(*snake_case__ ) self.play(*snake_case__ ) self.wait()
351
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
298
0
"""simple docstring""" import os import sys _lowerCAmelCase : str = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) _lowerCAmelCase : List[Any] = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *snake_case : List[Any] , **snake_case : Any )-> Dict: '''simple docstring''' return AutoConfig.from_pretrained(*snake_case , **snake_case ) @add_start_docstrings(AutoTokenizer.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] , **snake_case : Dict )-> Optional[int]: '''simple docstring''' return AutoTokenizer.from_pretrained(*snake_case , **snake_case ) @add_start_docstrings(AutoModel.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *snake_case : Optional[int] , **snake_case : str )-> Union[str, Any]: '''simple docstring''' return AutoModel.from_pretrained(*snake_case , **snake_case ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *snake_case : str , **snake_case : str )-> Tuple: '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *snake_case : Optional[Any] , **snake_case : Tuple )-> Tuple: '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *snake_case : List[str] , **snake_case : Any )-> Dict: '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *snake_case : List[Any] , **snake_case : int )-> str: '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
352
"""simple docstring""" import functools def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int: '''simple docstring''' UpperCAmelCase__ : List[str] = len(snake_case ) UpperCAmelCase__ : str = len(snake_case ) @functools.cache def min_distance(snake_case : int , snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin _lowerCAmelCase : List[Any] = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class lowerCAmelCase__ ( unittest.TestCase , __magic_name__ ): def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_tool("text-question-answering" ) self.tool.setup() UpperCAmelCase__ : Optional[Any] = load_tool("text-question-answering" , remote=snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.tool(snake_case__ , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.remote_tool(snake_case__ , "What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.remote_tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" ) self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
353
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : List[Any] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase__ : Tuple = input_file.read() UpperCAmelCase__ : Tuple = regexp.search(snake_case__ ) return match def __a ( self : List[str] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase__ : Dict = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase__ : int = regexp.finditer(snake_case__ ) UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Path("./datasets" ) UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case__ ) ): raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = Path("./datasets" ) UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case__ ) ): raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
298
0
"""simple docstring""" import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) _lowerCAmelCase : List[Any] = logging.getLogger(__name__) _lowerCAmelCase : List[str] = """Hello world! cécé herlolip""" _lowerCAmelCase : List[str] = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : List[Any] )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : Dict = BertAbsConfig( temp_dir="." , finetune_bert=snake_case , large=snake_case , share_emb=snake_case , use_bert_emb=snake_case , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , ) UpperCAmelCase__ : Dict = torch.load(snake_case , lambda snake_case , snake_case : storage ) UpperCAmelCase__ : Optional[int] = AbsSummarizer(snake_case , torch.device("cpu" ) , snake_case ) original.eval() UpperCAmelCase__ : List[str] = BertAbsSummarizer(snake_case , torch.device("cpu" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("convert the model" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("Make sure that the models' outputs are identical" ) UpperCAmelCase__ : Optional[int] = BertTokenizer.from_pretrained("bert-base-uncased" ) # prepare the model inputs UpperCAmelCase__ : Union[str, Any] = tokenizer.encode("This is sample éàalj'-." ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case )) ) UpperCAmelCase__ : List[str] = torch.tensor(snake_case ).unsqueeze(0 ) UpperCAmelCase__ : List[Any] = tokenizer.encode("This is sample 3 éàalj'-." ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case )) ) UpperCAmelCase__ : Dict = torch.tensor(snake_case ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass UpperCAmelCase__ : Dict = encoder_input_ids UpperCAmelCase__ : str = decoder_input_ids UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : int = None UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : Tuple = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical UpperCAmelCase__ : Optional[Any] = original(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )[0] UpperCAmelCase__ : int = original.generator(snake_case ) UpperCAmelCase__ : Any = new_model( snake_case , snake_case , snake_case , snake_case , snake_case )[0] UpperCAmelCase__ : Dict = new_model.generator(snake_case ) UpperCAmelCase__ : List[str] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("Maximum absolute difference beween weights: {:.2f}".format(snake_case ) ) UpperCAmelCase__ : Dict = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("Maximum absolute difference beween weights: {:.2f}".format(snake_case ) ) UpperCAmelCase__ : int = torch.allclose(snake_case , snake_case , atol=1E-3 ) if are_identical: logging.info("all weights are equal up to 1e-3" ) else: raise ValueError("the weights are different. The new model is likely different from the original one." ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("saving the model's state dictionary" ) torch.save( new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" ) if __name__ == "__main__": _lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) _lowerCAmelCase : Tuple = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
354
"""simple docstring""" import numpy as np import datasets _lowerCAmelCase : Optional[int] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ _lowerCAmelCase : Tuple = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ _lowerCAmelCase : Optional[int] = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ): '''simple docstring''' # convert to numpy arrays UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ ) UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T ) try: UpperCAmelCase__ : str = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ ) UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ ) UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
298
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : Tuple = logging.get_logger(__name__) _lowerCAmelCase : Tuple = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } _lowerCAmelCase : Any = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : str , snake_case : str , snake_case : Any , snake_case : Any )-> str: '''simple docstring''' for attribute in key.split("." ): UpperCAmelCase__ : int = getattr(snake_case , snake_case ) if weight_type is not None: UpperCAmelCase__ : List[Any] = getattr(snake_case , snake_case ).shape else: UpperCAmelCase__ : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": UpperCAmelCase__ : List[Any] = value elif weight_type == "weight_g": UpperCAmelCase__ : str = value elif weight_type == "weight_v": UpperCAmelCase__ : int = value elif weight_type == "bias": UpperCAmelCase__ : Optional[int] = value elif weight_type == "running_mean": UpperCAmelCase__ : Dict = value elif weight_type == "running_var": UpperCAmelCase__ : Tuple = value elif weight_type == "num_batches_tracked": UpperCAmelCase__ : Any = value elif weight_type == "inv_freq": UpperCAmelCase__ : Any = value else: UpperCAmelCase__ : Dict = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Any , snake_case : Optional[int] )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : Tuple = [] UpperCAmelCase__ : Union[str, Any] = fairseq_model.state_dict() UpperCAmelCase__ : Optional[int] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ : Dict = False if "conv_layers" in name: load_conv_layer( snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == "group" , ) UpperCAmelCase__ : List[str] = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ : Optional[int] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: UpperCAmelCase__ : str = True if "*" in mapped_key: UpperCAmelCase__ : str = name.split(snake_case )[0].split("." )[-2] UpperCAmelCase__ : Optional[Any] = mapped_key.replace("*" , snake_case ) if "pos_bias_u" in name: UpperCAmelCase__ : Dict = None elif "pos_bias_v" in name: UpperCAmelCase__ : Any = None elif "weight_g" in name: UpperCAmelCase__ : Optional[Any] = "weight_g" elif "weight_v" in name: UpperCAmelCase__ : Optional[Any] = "weight_v" elif "bias" in name: UpperCAmelCase__ : int = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ : List[Any] = "weight" elif "running_mean" in name: UpperCAmelCase__ : Optional[Any] = "running_mean" elif "inv_freq" in name: UpperCAmelCase__ : Dict = "inv_freq" elif "running_var" in name: UpperCAmelCase__ : List[str] = "running_var" elif "num_batches_tracked" in name: UpperCAmelCase__ : List[Any] = "num_batches_tracked" else: UpperCAmelCase__ : Any = None set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case ) continue if not is_used: unused_weights.append(snake_case ) logger.warning(f'Unused weights: {unused_weights}' ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Any , snake_case : Tuple , snake_case : List[str] )-> Dict: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = full_name.split("conv_layers." )[-1] UpperCAmelCase__ : int = name.split("." ) UpperCAmelCase__ : Tuple = int(items[0] ) UpperCAmelCase__ : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) UpperCAmelCase__ : int = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) UpperCAmelCase__ : Dict = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) UpperCAmelCase__ : Tuple = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) UpperCAmelCase__ : Optional[int] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any]=None , snake_case : Tuple=None , snake_case : str=True )-> str: '''simple docstring''' if config_path is not None: UpperCAmelCase__ : Any = WavaVecaConformerConfig.from_pretrained(snake_case , hidden_act="swish" ) else: UpperCAmelCase__ : List[str] = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCAmelCase__ : int = "rotary" if is_finetuned: if dict_path: UpperCAmelCase__ : str = Dictionary.load(snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase__ : Optional[int] = target_dict.pad_index UpperCAmelCase__ : str = target_dict.bos_index UpperCAmelCase__ : Tuple = target_dict.eos_index UpperCAmelCase__ : str = len(target_dict.symbols ) UpperCAmelCase__ : Dict = os.path.join(snake_case , "vocab.json" ) if not os.path.isdir(snake_case ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(snake_case ) ) return os.makedirs(snake_case , exist_ok=snake_case ) UpperCAmelCase__ : Union[str, Any] = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase__ : Tuple = 0 UpperCAmelCase__ : int = 1 with open(snake_case , "w" , encoding="utf-8" ) as vocab_handle: json.dump(snake_case , snake_case ) UpperCAmelCase__ : Tuple = WavaVecaCTCTokenizer( snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=snake_case , ) UpperCAmelCase__ : int = True if config.feat_extract_norm == "layer" else False UpperCAmelCase__ : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case , return_attention_mask=snake_case , ) UpperCAmelCase__ : Any = WavaVecaProcessor(feature_extractor=snake_case , tokenizer=snake_case ) processor.save_pretrained(snake_case ) UpperCAmelCase__ : str = WavaVecaConformerForCTC(snake_case ) else: UpperCAmelCase__ : List[Any] = WavaVecaConformerForPreTraining(snake_case ) if is_finetuned: UpperCAmelCase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: UpperCAmelCase__ : Optional[Any] = argparse.Namespace(task="audio_pretraining" ) UpperCAmelCase__ : Any = fairseq.tasks.setup_task(snake_case ) UpperCAmelCase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case ) UpperCAmelCase__ : Union[str, Any] = model[0].eval() recursively_load_weights(snake_case , snake_case , not is_finetuned ) hf_wavavec.save_pretrained(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) _lowerCAmelCase : List[Any] = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
355
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =IFPipeline SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self : Dict ): '''simple docstring''' return self._get_dummy_components() def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): UpperCAmelCase__ : str = torch.manual_seed(snake_case__ ) else: UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase__ : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self : Tuple ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self : Tuple ): '''simple docstring''' # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __a ( self : Dict ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __a ( self : int ): '''simple docstring''' self._test_save_load_local() def __a ( self : Any ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self : Optional[Any] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : str ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Tuple ): '''simple docstring''' # if UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : List[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : str = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Tuple = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : str = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Optional[Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
298
0
"""simple docstring""" import math def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float )-> float: '''simple docstring''' if initial_intensity < 0: raise ValueError("The value of intensity cannot be negative" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(snake_case ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="""malus_law""")
356
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } _lowerCAmelCase : List[Any] = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } _lowerCAmelCase : int = { """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = set() UpperCAmelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Dict = char UpperCAmelCase__ : Tuple = set(snake_case ) return pairs class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : Dict = vocab_file UpperCAmelCase__ : Tuple = merges_file UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Dict = 3 self.add_from_file(snake_case__ ) UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1] UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges] UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Dict = {} def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] UpperCAmelCase__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Tuple = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : Dict , snake_case__ : Tuple ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase__ : Any = get_pairs(snake_case__ ) if not pairs: return token while True: UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Tuple = 0 while i < len(snake_case__ ): try: UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Dict = j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Dict = tuple(snake_case__ ) UpperCAmelCase__ : List[Any] = new_word if len(snake_case__ ) == 1: break else: UpperCAmelCase__ : Dict = get_pairs(snake_case__ ) UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ ) UpperCAmelCase__ : Optional[int] = word[:-4] UpperCAmelCase__ : Union[str, Any] = word return word def __a ( self : List[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def __a ( self : Dict , snake_case__ : List[str] ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def __a ( self : List[Any] , snake_case__ : Any ): '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def __a ( self : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : Tuple = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : str = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ): copyfile(self.merges_file , snake_case__ ) return out_vocab_file, out_merge_file def __a ( self : List[Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): try: with open(snake_case__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' ) return UpperCAmelCase__ : Dict = f.readlines() for lineTmp in lines: UpperCAmelCase__ : Optional[int] = lineTmp.strip() UpperCAmelCase__ : Tuple = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) UpperCAmelCase__ : Any = line[:idx] UpperCAmelCase__ : str = len(self.encoder )
298
0
"""simple docstring""" from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( snake_case : list[int] )-> list[int]: # This function is recursive '''simple docstring''' UpperCAmelCase__ : Tuple = len(snake_case ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else UpperCAmelCase__ : List[Any] = array[0] UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Optional[Any] = 1 UpperCAmelCase__ : list[int] = [] while not is_found and i < array_length: if array[i] < pivot: UpperCAmelCase__ : List[Any] = True UpperCAmelCase__ : List[Any] = [element for element in array[i:] if element >= array[i]] UpperCAmelCase__ : Any = longest_subsequence(snake_case ) if len(snake_case ) > len(snake_case ): UpperCAmelCase__ : Optional[Any] = temp_array else: i += 1 UpperCAmelCase__ : str = [element for element in array[1:] if element >= pivot] UpperCAmelCase__ : List[Any] = [pivot, *longest_subsequence(snake_case )] if len(snake_case ) > len(snake_case ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
357
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 # setable values SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None @classmethod def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ ) @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ =42 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return True @register_to_config def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ): '''simple docstring''' UpperCAmelCase__ : Tuple = dtype def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ): '''simple docstring''' if common is None: UpperCAmelCase__ : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype ) UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , ) def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ): '''simple docstring''' return sample def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case__ , timesteps=snake_case__ , ) def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: UpperCAmelCase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": UpperCAmelCase__ : List[Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": UpperCAmelCase__ : List[str] = variance UpperCAmelCase__ : Optional[Any] = state.common.betas[t] UpperCAmelCase__ : Any = (predicted_variance + 1) / 2 UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if key is None: UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 ) else: UpperCAmelCase__ : int = None # 1. compute alphas, betas UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) UpperCAmelCase__ : List[str] = 1 - alpha_prod_t UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 ) UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ ) def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __len__( self : Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> list: '''simple docstring''' if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence UpperCAmelCase__ : Optional[int] = gray_code_sequence_string(snake_case ) # # convert them to integers for i in range(len(snake_case ) ): UpperCAmelCase__ : str = int(sequence[i] , 2 ) return sequence def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> list: '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] UpperCAmelCase__ : Tuple = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits UpperCAmelCase__ : Tuple = gray_code_sequence_string(bit_count - 1 ) UpperCAmelCase__ : Optional[int] = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): UpperCAmelCase__ : Optional[Any] = "0" + smaller_sequence[i] sequence.append(snake_case ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): UpperCAmelCase__ : str = "1" + smaller_sequence[i] sequence.append(snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
358
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCAmelCase__ : def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : Union[str, Any] = act_dim UpperCAmelCase__ : Dict = state_dim UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : List[str] = max_length UpperCAmelCase__ : int = is_training def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 ) UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase__ : Optional[int] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __a ( self : int ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase__ : Optional[int] = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =() SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids SCREAMING_SNAKE_CASE_ =False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = DecisionTransformerModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : List[str] ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Tuple = [*signature.parameters.keys()] UpperCAmelCase__ : str = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Optional[int] = model.config torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset() UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ ) UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase__ : Union[str, Any] = state UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 ) for step in range(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model( states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1] UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 ) UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase__ : Tuple = torch.cat( [timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
298
0
"""simple docstring""" import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Any = """Hello, World!""" _lowerCAmelCase : List[Any] = """en_XX""" def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : bool )-> Tuple: '''simple docstring''' UpperCAmelCase__ : Any = Path("data_bin" ) UpperCAmelCase__ : int = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(snake_case ).parent ) , checkpoint_file=Path(snake_case ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , ) xmod.eval() # disable dropout print(snake_case ) UpperCAmelCase__ : Any = xmod.model.encoder.sentence_encoder UpperCAmelCase__ : List[str] = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , ) if classification_head: UpperCAmelCase__ : List[str] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:" , snake_case ) UpperCAmelCase__ : str = XmodForSequenceClassification(snake_case ) if classification_head else XmodForMaskedLM(snake_case ) model.eval() # Now let's copy all the weights. # Embeddings UpperCAmelCase__ : int = xmod_sent_encoder.embed_tokens.weight UpperCAmelCase__ : int = xmod_sent_encoder.embed_positions.weight UpperCAmelCase__ : Any = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. UpperCAmelCase__ : str = xmod_sent_encoder.layernorm_embedding.weight UpperCAmelCase__ : Optional[Any] = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer UpperCAmelCase__ : str = model.roberta.encoder.layer[i] UpperCAmelCase__ : Dict = xmod_sent_encoder.layers[i] # self attention UpperCAmelCase__ : Any = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError("Dimensions of self-attention weights do not match." ) UpperCAmelCase__ : Optional[int] = xmod_layer.self_attn.q_proj.weight UpperCAmelCase__ : int = xmod_layer.self_attn.q_proj.bias UpperCAmelCase__ : int = xmod_layer.self_attn.k_proj.weight UpperCAmelCase__ : Optional[Any] = xmod_layer.self_attn.k_proj.bias UpperCAmelCase__ : Dict = xmod_layer.self_attn.v_proj.weight UpperCAmelCase__ : Any = xmod_layer.self_attn.v_proj.bias # self-attention output UpperCAmelCase__ : int = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match." ) UpperCAmelCase__ : Optional[int] = xmod_layer.self_attn.out_proj.weight UpperCAmelCase__ : List[str] = xmod_layer.self_attn.out_proj.bias UpperCAmelCase__ : int = xmod_layer.self_attn_layer_norm.weight UpperCAmelCase__ : Dict = xmod_layer.self_attn_layer_norm.bias # intermediate UpperCAmelCase__ : Tuple = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match." ) UpperCAmelCase__ : str = xmod_layer.fca.weight UpperCAmelCase__ : List[Any] = xmod_layer.fca.bias # output UpperCAmelCase__ : Union[str, Any] = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match." ) UpperCAmelCase__ : Optional[int] = xmod_layer.fca.weight UpperCAmelCase__ : str = xmod_layer.fca.bias UpperCAmelCase__ : int = xmod_layer.final_layer_norm.weight UpperCAmelCase__ : Optional[int] = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: UpperCAmelCase__ : List[str] = xmod_layer.adapter_layer_norm.weight UpperCAmelCase__ : Optional[Any] = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError("Lists of language adapters do not match." ) for lang_code, adapter in xmod_layer.adapter_modules.items(): UpperCAmelCase__ : List[str] = bert_output.adapter_modules[lang_code] UpperCAmelCase__ : Optional[Any] = xmod_layer.adapter_modules[lang_code] UpperCAmelCase__ : List[Any] = from_adapter.fca.weight UpperCAmelCase__ : List[str] = from_adapter.fca.bias UpperCAmelCase__ : Optional[int] = from_adapter.fca.weight UpperCAmelCase__ : Tuple = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: UpperCAmelCase__ : Union[str, Any] = xmod_sent_encoder.layer_norm.weight UpperCAmelCase__ : Tuple = xmod_sent_encoder.layer_norm.bias if classification_head: UpperCAmelCase__ : Union[str, Any] = xmod.model.classification_heads["mnli"].dense.weight UpperCAmelCase__ : int = xmod.model.classification_heads["mnli"].dense.bias UpperCAmelCase__ : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight UpperCAmelCase__ : str = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head UpperCAmelCase__ : str = xmod.model.encoder.lm_head.dense.weight UpperCAmelCase__ : Optional[int] = xmod.model.encoder.lm_head.dense.bias UpperCAmelCase__ : List[Any] = xmod.model.encoder.lm_head.layer_norm.weight UpperCAmelCase__ : int = xmod.model.encoder.lm_head.layer_norm.bias UpperCAmelCase__ : List[str] = xmod.model.encoder.lm_head.weight UpperCAmelCase__ : Any = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. UpperCAmelCase__ : Union[str, Any] = xmod.encode(snake_case ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(snake_case ) UpperCAmelCase__ : Union[str, Any] = model(snake_case )[0] if classification_head: UpperCAmelCase__ : Dict = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case ) ) else: UpperCAmelCase__ : str = xmod.model(snake_case , lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape , their_output.shape ) UpperCAmelCase__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7 UpperCAmelCase__ : int = torch.allclose(snake_case , snake_case , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) Path(snake_case ).mkdir(parents=snake_case , exist_ok=snake_case ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) _lowerCAmelCase : Tuple = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
359
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
298
0
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str]=None )-> Any: '''simple docstring''' assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match' UpperCAmelCase__ : Dict = nn.Parameter(snake_case ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match' UpperCAmelCase__ : Union[str, Any] = nn.Parameter(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple , snake_case : int )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : str = np.asarray(weights[0] ) UpperCAmelCase__ : int = np.asarray(weights[1] ) UpperCAmelCase__ : List[str] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , ) set_param( torch_layer.self_attention.value , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , ) set_param( torch_layer.output.dense , torch.tensor(snake_case ).view(-1 , snake_case ).contiguous().transpose(0 , 1 ) , ) def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : List[Any] )-> Any: '''simple docstring''' UpperCAmelCase__ : Any = np.asarray(weights[0] ) UpperCAmelCase__ : Any = np.asarray(weights[1] ) UpperCAmelCase__ : Union[str, Any] = np.asarray(weights[2] ) UpperCAmelCase__ : Union[str, Any] = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , ) set_param( torch_layer.self_attention.key , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , ) set_param( torch_layer.self_attention.value , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , ) set_param( torch_layer.output.dense , torch.tensor(snake_case ).view(-1 , snake_case ).contiguous().transpose(0 , 1 ) , ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any , snake_case : int )-> Any: '''simple docstring''' UpperCAmelCase__ : List[Any] = weights[0][0][0] UpperCAmelCase__ : Optional[Any] = np.asarray(layer_norm_a[0] ) UpperCAmelCase__ : Optional[int] = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(snake_case ) , torch.tensor(snake_case ) , ) # lsh weights + output UpperCAmelCase__ : List[Any] = weights[0][1] if len(snake_case ) < 4: set_layer_weights_in_torch_lsh(snake_case , torch_block.attention , snake_case ) else: set_layer_weights_in_torch_local(snake_case , torch_block.attention , snake_case ) # intermediate weighs UpperCAmelCase__ : Any = weights[2][0][1][2] # Chunked Feed Forward if len(snake_case ) == 4: UpperCAmelCase__ : int = intermediate_weights[2] # layernorm 2 UpperCAmelCase__ : Optional[int] = np.asarray(intermediate_weights[0][0] ) UpperCAmelCase__ : Optional[int] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(snake_case ) , torch.tensor(snake_case ) , ) # intermediate dense UpperCAmelCase__ : List[Any] = np.asarray(intermediate_weights[1][0] ) UpperCAmelCase__ : Union[str, Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case ) , ) # intermediate out UpperCAmelCase__ : Any = np.asarray(intermediate_weights[4][0] ) UpperCAmelCase__ : Optional[int] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case ) , ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : str )-> List[str]: '''simple docstring''' UpperCAmelCase__ : str = torch_model.reformer # word embeds UpperCAmelCase__ : int = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case ) , ) if isinstance(weights[3] , snake_case ): UpperCAmelCase__ : Optional[int] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): UpperCAmelCase__ : Tuple = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f'{position_embeddings[emb_idx]} emb does not match' UpperCAmelCase__ : List[str] = nn.Parameter(torch.tensor(snake_case ) ) UpperCAmelCase__ : List[str] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( snake_case ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): UpperCAmelCase__ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(snake_case , snake_case , snake_case ) # output layer norm UpperCAmelCase__ : List[str] = np.asarray(weights[7][0] ) UpperCAmelCase__ : Optional[Any] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case ) , torch.tensor(snake_case ) , ) # output embeddings UpperCAmelCase__ : Optional[Any] = np.asarray(weights[9][0] ) UpperCAmelCase__ : int = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case ) , ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : List[Any] , snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : int = ReformerConfig.from_json_file(snake_case ) print(f'Building PyTorch model from configuration: {config}' ) UpperCAmelCase__ : Tuple = ReformerModelWithLMHead(snake_case ) with open(snake_case , "rb" ) as f: UpperCAmelCase__ : Dict = pickle.load(snake_case )["weights"] set_model_weights_in_torch(snake_case , snake_case , config.hidden_size ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained Reformer model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCAmelCase : List[str] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
360
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 # setable values SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None @classmethod def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ ) @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ =42 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return True @register_to_config def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ): '''simple docstring''' UpperCAmelCase__ : Tuple = dtype def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ): '''simple docstring''' if common is None: UpperCAmelCase__ : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype ) UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , ) def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ): '''simple docstring''' return sample def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case__ , timesteps=snake_case__ , ) def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: UpperCAmelCase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": UpperCAmelCase__ : List[Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": UpperCAmelCase__ : List[str] = variance UpperCAmelCase__ : Optional[Any] = state.common.betas[t] UpperCAmelCase__ : Any = (predicted_variance + 1) / 2 UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if key is None: UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 ) else: UpperCAmelCase__ : int = None # 1. compute alphas, betas UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) UpperCAmelCase__ : List[str] = 1 - alpha_prod_t UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 ) UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ ) def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __len__( self : Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
361
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy() UpperCAmelCase__ : List[Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
298
0
"""simple docstring""" import math import tensorflow as tf from packaging import version def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> int: '''simple docstring''' UpperCAmelCase__ : Any = tf.convert_to_tensor(snake_case ) UpperCAmelCase__ : Optional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> List[Any]: '''simple docstring''' UpperCAmelCase__ : Any = tf.convert_to_tensor(snake_case ) UpperCAmelCase__ : Optional[int] = tf.cast(math.pi , x.dtype ) UpperCAmelCase__ : List[str] = tf.cast(0.04_4715 , x.dtype ) UpperCAmelCase__ : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(snake_case , 3 )) )) return x * cdf def SCREAMING_SNAKE_CASE__ ( snake_case : Dict )-> Any: '''simple docstring''' UpperCAmelCase__ : Optional[int] = tf.convert_to_tensor(snake_case ) return x * tf.tanh(tf.math.softplus(snake_case ) ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> List[Any]: '''simple docstring''' UpperCAmelCase__ : Any = tf.convert_to_tensor(snake_case ) UpperCAmelCase__ : Tuple = tf.cast(0.04_4715 , x.dtype ) UpperCAmelCase__ : Dict = tf.cast(0.79_7884_5608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def SCREAMING_SNAKE_CASE__ ( snake_case : Dict )-> List[str]: '''simple docstring''' UpperCAmelCase__ : Optional[int] = tf.convert_to_tensor(snake_case ) UpperCAmelCase__ : List[str] = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str: '''simple docstring''' return tf.clip_by_value(_gelu(snake_case ) , -10 , 10 ) def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : List[Any]=-1 )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = tf.split(snake_case , 2 , axis=snake_case ) return a * tf.math.sigmoid(snake_case ) if version.parse(tf.version.VERSION) >= version.parse("""2.4"""): def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> int: '''simple docstring''' return tf.keras.activations.gelu(snake_case , approximate=snake_case ) _lowerCAmelCase : List[str] = tf.keras.activations.gelu _lowerCAmelCase : int = approximate_gelu_wrap else: _lowerCAmelCase : Union[str, Any] = _gelu _lowerCAmelCase : Optional[Any] = _gelu_new _lowerCAmelCase : Union[str, Any] = { """gelu""": gelu, """gelu_10""": gelu_aa, """gelu_fast""": gelu_fast, """gelu_new""": gelu_new, """glu""": glu, """mish""": mish, """quick_gelu""": quick_gelu, """relu""": tf.keras.activations.relu, """sigmoid""": tf.keras.activations.sigmoid, """silu""": tf.keras.activations.swish, """swish""": tf.keras.activations.swish, """tanh""": tf.keras.activations.tanh, } def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> Dict: '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
362
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : int = seq_length UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Union[str, Any] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Any = embedding_size UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : List[Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Tuple = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : Union[str, Any] = scope def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = AlbertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_labels UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.num_labels UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_choices UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Tuple = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =True def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ): '''simple docstring''' UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Dict = type self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" ) UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case__ ) UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : list )-> list: '''simple docstring''' def merge(snake_case : list , snake_case : list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(snake_case ) <= 1: return collection UpperCAmelCase__ : str = len(snake_case ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip() _lowerCAmelCase : Dict = [int(item) for item in user_input.split(""",""")] print(*merge_sort(unsorted), sep=""",""")
363
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = list(range(snake_case ) ) # Find permutation while factorials: UpperCAmelCase__ : str = factorials.pop() UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" import pprint import requests _lowerCAmelCase : List[str] = """https://zenquotes.io/api""" def SCREAMING_SNAKE_CASE__ ( )-> list: '''simple docstring''' return requests.get(API_ENDPOINT_URL + "/today" ).json() def SCREAMING_SNAKE_CASE__ ( )-> list: '''simple docstring''' return requests.get(API_ENDPOINT_URL + "/random" ).json() if __name__ == "__main__": _lowerCAmelCase : Optional[int] = random_quotes() pprint.pprint(response)
364
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _lowerCAmelCase : Union[str, Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0} UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : int = min_resolution UpperCAmelCase__ : Tuple = max_resolution UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : Optional[int] = do_normalize UpperCAmelCase__ : str = do_convert_rgb UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def __a ( self : str ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = PixaStructImageProcessingTester(self ) @property def __a ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ : Dict = 2_0_4_8 UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase__ : Optional[int] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(snake_case__ ): UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches UpperCAmelCase__ : Optional[Any] = "Hello" UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Dict ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Dict = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : List[str] = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Optional[int] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase__ : Optional[int] = 3 @property def __a ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : int ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
298
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase : Tuple = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""", # See all Dinat models at https://huggingface.co/models?filter=dinat } class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''dinat''' SCREAMING_SNAKE_CASE_ ={ '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Any , snake_case__ : List[Any]=4 , snake_case__ : Optional[Any]=3 , snake_case__ : Tuple=6_4 , snake_case__ : str=[3, 4, 6, 5] , snake_case__ : Optional[Any]=[2, 4, 8, 1_6] , snake_case__ : Any=7 , snake_case__ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , snake_case__ : str=3.0 , snake_case__ : Dict=True , snake_case__ : Optional[Any]=0.0 , snake_case__ : str=0.0 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[int]=1e-5 , snake_case__ : Optional[int]=0.0 , snake_case__ : Union[str, Any]=None , snake_case__ : int=None , **snake_case__ : List[str] , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = patch_size UpperCAmelCase__ : str = num_channels UpperCAmelCase__ : List[str] = embed_dim UpperCAmelCase__ : Any = depths UpperCAmelCase__ : int = len(snake_case__ ) UpperCAmelCase__ : List[Any] = num_heads UpperCAmelCase__ : List[str] = kernel_size UpperCAmelCase__ : Any = dilations UpperCAmelCase__ : List[Any] = mlp_ratio UpperCAmelCase__ : List[Any] = qkv_bias UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Any = attention_probs_dropout_prob UpperCAmelCase__ : Union[str, Any] = drop_path_rate UpperCAmelCase__ : str = hidden_act UpperCAmelCase__ : Optional[Any] = layer_norm_eps UpperCAmelCase__ : Optional[Any] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase__ : Optional[int] = int(embed_dim * 2 ** (len(snake_case__ ) - 1) ) UpperCAmelCase__ : Optional[int] = layer_scale_init_value UpperCAmelCase__ : List[Any] = ["stem"] + [f'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )] UpperCAmelCase__ : Optional[Any] = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
365
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : int = "mock-s3-bucket" UpperCAmelCase__ : Any = f's3://{mock_bucket}' UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case ) assert dataset_path.startswith("s3://" ) is False UpperCAmelCase__ : str = "./local/path" UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is True UpperCAmelCase__ : str = fsspec.filesystem("file" ) UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case ) UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} UpperCAmelCase__ : int = compressed_file_paths[protocol] UpperCAmelCase__ : Any = "dataset.jsonl" UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case ) UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(snake_case ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
298
0
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() _lowerCAmelCase : str = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = """The Nymphenburg Palace is a beautiful palace in Munich!""" def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> List[str]: UpperCAmelCase__ : Optional[int] = { "attention_cell": "multi_head", "num_layers": 4, "units": 1024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1E-5, "token_type_vocab_size": 2, } UpperCAmelCase__ : Tuple = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py UpperCAmelCase__ : List[str] = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case , output_all_encodings=snake_case , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later UpperCAmelCase__ : Any = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab UpperCAmelCase__ : Tuple = os.path.join(get_home_dir() , "models" ) UpperCAmelCase__ : Union[str, Any] = _load_vocab(snake_case , snake_case , snake_case , cls=snake_case ) UpperCAmelCase__ : Union[str, Any] = nlp.model.BERTModel( snake_case , len(snake_case ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case , use_token_type_embed=snake_case , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case , use_decoder=snake_case , ) original_bort.load_parameters(snake_case , cast_dtype=snake_case , ignore_extra=snake_case ) UpperCAmelCase__ : Optional[int] = original_bort._collect_params_with_prefix() # Build our config 🤗 UpperCAmelCase__ : Dict = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(snake_case ), } UpperCAmelCase__ : Any = BertConfig.from_dict(snake_case ) UpperCAmelCase__ : Any = BertForMaskedLM(snake_case ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case : Dict ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case : Optional[int] , snake_case : Tuple ): UpperCAmelCase__ : Dict = hf_param.shape UpperCAmelCase__ : Optional[int] = to_torch(params[gluon_param] ) UpperCAmelCase__ : Optional[int] = gluon_param.shape assert ( shape_hf == shape_gluon ), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param UpperCAmelCase__ : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) UpperCAmelCase__ : Dict = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) UpperCAmelCase__ : Dict = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) UpperCAmelCase__ : Tuple = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) UpperCAmelCase__ : Dict = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): UpperCAmelCase__ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention UpperCAmelCase__ : BertSelfAttention = layer.attention.self UpperCAmelCase__ : Optional[int] = check_and_map_params( self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) UpperCAmelCase__ : List[str] = check_and_map_params( self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) UpperCAmelCase__ : int = check_and_map_params( self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) UpperCAmelCase__ : Any = check_and_map_params( self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) UpperCAmelCase__ : Dict = check_and_map_params( self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) UpperCAmelCase__ : int = check_and_map_params( self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output UpperCAmelCase__ : BertSelfOutput = layer.attention.output UpperCAmelCase__ : Union[str, Any] = check_and_map_params( self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' ) UpperCAmelCase__ : Optional[int] = check_and_map_params( self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' ) UpperCAmelCase__ : Union[str, Any] = check_and_map_params( self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' ) UpperCAmelCase__ : List[Any] = check_and_map_params( self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate UpperCAmelCase__ : BertIntermediate = layer.intermediate UpperCAmelCase__ : str = check_and_map_params( intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) UpperCAmelCase__ : str = check_and_map_params( intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output UpperCAmelCase__ : BertOutput = layer.output UpperCAmelCase__ : Dict = check_and_map_params( bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) UpperCAmelCase__ : str = check_and_map_params( bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) UpperCAmelCase__ : Optional[Any] = check_and_map_params( bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) UpperCAmelCase__ : List[str] = check_and_map_params( bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models UpperCAmelCase__ : Optional[Any] = RobertaTokenizer.from_pretrained("roberta-base" ) UpperCAmelCase__ : Tuple = tokenizer.encode_plus(snake_case )["input_ids"] # Get gluon output UpperCAmelCase__ : str = mx.nd.array([input_ids] ) UpperCAmelCase__ : Dict = original_bort(inputs=snake_case , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case ) UpperCAmelCase__ : str = BertModel.from_pretrained(snake_case ) hf_bort_model.eval() UpperCAmelCase__ : List[str] = tokenizer.encode_plus(snake_case , return_tensors="pt" ) UpperCAmelCase__ : int = hf_bort_model(**snake_case )[0] UpperCAmelCase__ : Optional[int] = output_gluon[0].asnumpy() UpperCAmelCase__ : Tuple = output_hf[0].detach().numpy() UpperCAmelCase__ : List[str] = np.max(np.abs(hf_layer - gluon_layer ) ).item() UpperCAmelCase__ : List[Any] = np.allclose(snake_case , snake_case , atol=1E-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , snake_case ) if __name__ == "__main__": _lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''megatron-bert''' def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Any = max_position_embeddings UpperCAmelCase__ : Dict = type_vocab_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : int = layer_norm_eps UpperCAmelCase__ : Optional[Any] = position_embedding_type UpperCAmelCase__ : Any = use_cache
298
0
"""simple docstring""" import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] , snake_case : Dict )-> int: '''simple docstring''' return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :] def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : str , snake_case : List[Any] , snake_case : str="attention" )-> Dict: '''simple docstring''' UpperCAmelCase__ : Dict = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] ) UpperCAmelCase__ : str = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) UpperCAmelCase__ : Dict = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] ) UpperCAmelCase__ : Union[str, Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) UpperCAmelCase__ : Dict = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] ) UpperCAmelCase__ : List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) UpperCAmelCase__ : Union[str, Any] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] ) UpperCAmelCase__ : Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : List[str] , snake_case : Dict , snake_case : Union[str, Any]=False )-> Optional[int]: '''simple docstring''' if split_mlp_wi: UpperCAmelCase__ : List[str] = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :] UpperCAmelCase__ : Optional[int] = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :] UpperCAmelCase__ : List[str] = (wi_a, wi_a) else: UpperCAmelCase__ : List[Any] = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :] UpperCAmelCase__ : Optional[Any] = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :] return wi, wo def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Any , snake_case : Optional[Any] )-> Optional[int]: '''simple docstring''' return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i] def SCREAMING_SNAKE_CASE__ ( snake_case : dict , *, snake_case : int , snake_case : bool , snake_case : bool = False )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = traverse_util.flatten_dict(variables["target"] ) UpperCAmelCase__ : Tuple = {"/".join(snake_case ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCAmelCase__ : int = "encoder/encoder/mlp/wi_0/kernel" in old print("Split MLP:" , snake_case ) UpperCAmelCase__ : List[str] = collections.OrderedDict() # Shared embeddings. UpperCAmelCase__ : Tuple = old["token_embedder/embedding"] # Encoder. for i in range(snake_case ): # Block i, layer 0 (Self Attention). UpperCAmelCase__ : Optional[Any] = tax_layer_norm_lookup(snake_case , snake_case , "encoder" , "pre_attention_layer_norm" ) UpperCAmelCase__ : int = tax_attention_lookup(snake_case , snake_case , "encoder" , "attention" ) UpperCAmelCase__ : int = layer_norm UpperCAmelCase__ : Tuple = k.T UpperCAmelCase__ : str = o.T UpperCAmelCase__ : List[str] = q.T UpperCAmelCase__ : str = v.T # Block i, layer 1 (MLP). UpperCAmelCase__ : Optional[Any] = tax_layer_norm_lookup(snake_case , snake_case , "encoder" , "pre_mlp_layer_norm" ) UpperCAmelCase__ : Dict = tax_mlp_lookup(snake_case , snake_case , "encoder" , snake_case ) UpperCAmelCase__ : Any = layer_norm if split_mlp_wi: UpperCAmelCase__ : List[Any] = wi[0].T UpperCAmelCase__ : Optional[Any] = wi[1].T else: UpperCAmelCase__ : Dict = wi.T UpperCAmelCase__ : Union[str, Any] = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCAmelCase__ : Tuple = tax_relpos_bias_lookup( snake_case , snake_case , "encoder" ).T UpperCAmelCase__ : Any = old["encoder/encoder_norm/scale"] if not scalable_attention: UpperCAmelCase__ : List[str] = tax_relpos_bias_lookup( snake_case , 0 , "encoder" ).T UpperCAmelCase__ : int = tax_relpos_bias_lookup( snake_case , 0 , "decoder" ).T if not is_encoder_only: # Decoder. for i in range(snake_case ): # Block i, layer 0 (Self Attention). UpperCAmelCase__ : List[str] = tax_layer_norm_lookup(snake_case , snake_case , "decoder" , "pre_self_attention_layer_norm" ) UpperCAmelCase__ : Tuple = tax_attention_lookup(snake_case , snake_case , "decoder" , "self_attention" ) UpperCAmelCase__ : Tuple = layer_norm UpperCAmelCase__ : List[str] = k.T UpperCAmelCase__ : List[Any] = o.T UpperCAmelCase__ : Optional[int] = q.T UpperCAmelCase__ : int = v.T # Block i, layer 1 (Cross Attention). UpperCAmelCase__ : List[Any] = tax_layer_norm_lookup(snake_case , snake_case , "decoder" , "pre_cross_attention_layer_norm" ) UpperCAmelCase__ : Union[str, Any] = tax_attention_lookup(snake_case , snake_case , "decoder" , "encoder_decoder_attention" ) UpperCAmelCase__ : Union[str, Any] = layer_norm UpperCAmelCase__ : str = k.T UpperCAmelCase__ : Any = o.T UpperCAmelCase__ : Optional[Any] = q.T UpperCAmelCase__ : List[str] = v.T # Block i, layer 2 (MLP). UpperCAmelCase__ : Optional[int] = tax_layer_norm_lookup(snake_case , snake_case , "decoder" , "pre_mlp_layer_norm" ) UpperCAmelCase__ : Optional[Any] = tax_mlp_lookup(snake_case , snake_case , "decoder" , snake_case ) UpperCAmelCase__ : Dict = layer_norm if split_mlp_wi: UpperCAmelCase__ : Optional[Any] = wi[0].T UpperCAmelCase__ : Optional[Any] = wi[1].T else: UpperCAmelCase__ : Dict = wi.T UpperCAmelCase__ : Dict = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCAmelCase__ : List[str] = tax_relpos_bias_lookup(snake_case , snake_case , "decoder" ).T UpperCAmelCase__ : Optional[Any] = old["decoder/decoder_norm/scale"] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCAmelCase__ : Union[str, Any] = old["decoder/logits_dense/kernel"].T return new def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : bool )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCAmelCase__ : Union[str, Any] = state_dict["shared.weight"] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCAmelCase__ : Any = state_dict["shared.weight"] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head." ) UpperCAmelCase__ : Dict = state_dict["shared.weight"] return state_dict def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : List[str] , snake_case : List[Any] , snake_case : str , snake_case : Dict )-> Any: '''simple docstring''' UpperCAmelCase__ : Dict = checkpoints.load_tax_checkpoint(snake_case ) UpperCAmelCase__ : List[str] = convert_tax_to_pytorch( snake_case , num_layers=config.num_layers , is_encoder_only=snake_case , scalable_attention=snake_case ) UpperCAmelCase__ : Union[str, Any] = make_state_dict(snake_case , snake_case ) model.load_state_dict(snake_case , strict=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : str , snake_case : Optional[Any] , snake_case : bool = False , snake_case : bool = False , )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : int = MTaConfig.from_json_file(snake_case ) print(f'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCAmelCase__ : str = UMTaEncoderModel(snake_case ) else: UpperCAmelCase__ : str = UMTaForConditionalGeneration(snake_case ) # Load weights from tf checkpoint load_tax_weights_in_ta(snake_case , snake_case , snake_case , snake_case , snake_case ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(snake_case ) # Verify that we can load the checkpoint. model.from_pretrained(snake_case ) print("Done" ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""") # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False ) parser.add_argument( """--scalable_attention""", action="""store_true""", help="""Whether the model uses scaled attention (umt5 model)""", default=False, ) _lowerCAmelCase : int = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
367
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Dict ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , ) def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def __a ( self : Any , snake_case__ : str , snake_case__ : str ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class lowerCAmelCase__ ( __magic_name__ ): @require_beam def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : Dict ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Dict = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): @register_to_config def __init__( self : Any , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (6_4,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 3_2 , snake_case__ : int = 2_5_6 , snake_case__ : int = 3_2 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.1_8215 , snake_case__ : str = "group" , ): '''simple docstring''' super().__init__() # pass init params to Encoder UpperCAmelCase__ : Any = Encoder( in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , ) UpperCAmelCase__ : Union[str, Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels UpperCAmelCase__ : Tuple = nn.Convad(snake_case__ , snake_case__ , 1 ) UpperCAmelCase__ : List[Any] = VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ ) UpperCAmelCase__ : Any = nn.Convad(snake_case__ , snake_case__ , 1 ) # pass init params to Decoder UpperCAmelCase__ : Any = Decoder( in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , ) @apply_forward_hook def __a ( self : int , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.encoder(snake_case__ ) UpperCAmelCase__ : List[str] = self.quant_conv(snake_case__ ) if not return_dict: return (h,) return VQEncoderOutput(latents=snake_case__ ) @apply_forward_hook def __a ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ): '''simple docstring''' # also go through quantization layer if not force_not_quantize: UpperCAmelCase__ : Any = self.quantize(snake_case__ ) else: UpperCAmelCase__ : Union[str, Any] = h UpperCAmelCase__ : Any = self.post_quant_conv(snake_case__ ) UpperCAmelCase__ : int = self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=snake_case__ ) def __a ( self : Tuple , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ): '''simple docstring''' UpperCAmelCase__ : Tuple = sample UpperCAmelCase__ : List[Any] = self.encode(snake_case__ ).latents UpperCAmelCase__ : Optional[Any] = self.decode(snake_case__ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=snake_case__ )
368
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =XLMTokenizer SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case__ ) ) def __a ( self : Union[str, Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = "lower newer" UpperCAmelCase__ : Optional[Any] = "lower newer" return input_text, output_text def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : List[Any] = "lower" UpperCAmelCase__ : Any = ["low", "er</w>"] UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"] UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
298
0
from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = """T5Config""" class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''mt5''' SCREAMING_SNAKE_CASE_ =MTaConfig class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''mt5''' SCREAMING_SNAKE_CASE_ =MTaConfig class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''mt5''' SCREAMING_SNAKE_CASE_ =MTaConfig
369
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ): '''simple docstring''' UpperCAmelCase__ : Any = "bilinear" UpperCAmelCase__ : Any = max_size UpperCAmelCase__ : Any = short_edge_length def __call__( self : Dict , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for img in imgs: UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ ) if h < w: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w else: UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size if max(snake_case__ , snake_case__ ) > self.max_size: UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = newh * scale UpperCAmelCase__ : int = neww * scale UpperCAmelCase__ : List[Any] = int(neww + 0.5 ) UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase__ : Any = Image.fromarray(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ ) else: UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase__ : Tuple = nn.functional.interpolate( snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 ) img_augs.append(snake_case__ ) return img_augs class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase__ : Any = cfg.INPUT.FORMAT UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY UpperCAmelCase__ : str = cfg.PAD_VALUE UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std def __a ( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images] UpperCAmelCase__ : int = [ nn.functional.pad( snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case__ , snake_case__ ) ] return torch.stack(snake_case__ ), torch.tensor(snake_case__ ) def __call__( self : str , snake_case__ : int , snake_case__ : int=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [images] if single_image: assert len(snake_case__ ) == 1 for i in range(len(snake_case__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase__ : Tuple = self.aug(snake_case__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images] # now pad them to do the following operations UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int: '''simple docstring''' assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size tensor[:, 0].clamp_(min=0 , max=snake_case ) tensor[:, 1].clamp_(min=0 , max=snake_case ) tensor[:, 2].clamp_(min=0 , max=snake_case ) tensor[:, 3].clamp_(min=0 , max=snake_case )
298
0
"""simple docstring""" import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any=2 , snake_case__ : Optional[Any]=5_6 , snake_case__ : Tuple=True , snake_case__ : List[str]=True , snake_case__ : str=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=9_9 , snake_case__ : Optional[int]=3_2 , snake_case__ : List[str]=2 , snake_case__ : List[str]=2 , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[int]="gelu_new" , snake_case__ : Optional[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Union[str, Any]=1_6 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Optional[Any]=4 , snake_case__ : str="block_sparse" , snake_case__ : Union[str, Any]=True , snake_case__ : Union[str, Any]=False , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=3 , ): '''simple docstring''' UpperCAmelCase__ : Dict = parent UpperCAmelCase__ : Optional[int] = batch_size UpperCAmelCase__ : List[str] = seq_length UpperCAmelCase__ : Optional[Any] = is_training UpperCAmelCase__ : List[str] = use_attention_mask UpperCAmelCase__ : List[Any] = use_token_type_ids UpperCAmelCase__ : Union[str, Any] = use_labels UpperCAmelCase__ : Tuple = vocab_size UpperCAmelCase__ : Dict = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : str = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase__ : int = max_position_embeddings UpperCAmelCase__ : Dict = type_vocab_size UpperCAmelCase__ : Optional[Any] = type_sequence_label_size UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : int = num_choices UpperCAmelCase__ : List[Any] = rescale_embeddings UpperCAmelCase__ : Tuple = attention_type UpperCAmelCase__ : Union[str, Any] = use_bias UpperCAmelCase__ : Optional[int] = block_size UpperCAmelCase__ : List[Any] = num_random_blocks def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : str = None if self.use_attention_mask: UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : List[str] = None if self.use_token_type_ids: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : Any = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : int = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __a ( self : Union[str, Any] ): '''simple docstring''' super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __a ( self : List[Any] ): '''simple docstring''' super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __a ( self : Tuple ): '''simple docstring''' super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __a ( self : int ): '''simple docstring''' super().test_hidden_states_output() @slow def __a ( self : List[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(snake_case__ ) def __a ( self : Optional[int] ): '''simple docstring''' if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : List[str] = self._prepare_for_class(snake_case__ , snake_case__ ) UpperCAmelCase__ : str = model_class(snake_case__ ) @jax.jit def model_jitted(snake_case__ : str , snake_case__ : List[str]=None , **snake_case__ : Optional[int] ): return model(input_ids=snake_case__ , attention_mask=snake_case__ , **snake_case__ ) with self.subTest("JIT Enabled" ): UpperCAmelCase__ : List[str] = model_jitted(**snake_case__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase__ : List[str] = model_jitted(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) for jitted_output, output in zip(snake_case__ , snake_case__ ): self.assertEqual(jitted_output.shape , output.shape ) def __a ( self : Tuple , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=1e-5 , snake_case__ : str="outputs" , snake_case__ : Any=None ): '''simple docstring''' # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
370
"""simple docstring""" import qiskit def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" ) UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
298
0
"""simple docstring""" import sys def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = len(snake_case ) UpperCAmelCase__ : int = [[0 for x in range(snake_case )] for x in range(snake_case )] UpperCAmelCase__ : str = [[0 for x in range(snake_case )] for x in range(snake_case )] for chain_length in range(2 , snake_case ): for a in range(1 , n - chain_length + 1 ): UpperCAmelCase__ : Dict = a + chain_length - 1 UpperCAmelCase__ : Optional[Any] = sys.maxsize for c in range(snake_case , snake_case ): UpperCAmelCase__ : Optional[Any] = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: UpperCAmelCase__ : Optional[Any] = cost UpperCAmelCase__ : Any = c return matrix, sol def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] )-> Any: '''simple docstring''' if i == j: print("A" + str(snake_case ) , end=" " ) else: print("(" , end=" " ) print_optiomal_solution(snake_case , snake_case , optimal_solution[i][j] ) print_optiomal_solution(snake_case , optimal_solution[i][j] + 1 , snake_case ) print(")" , end=" " ) def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' UpperCAmelCase__ : Optional[int] = [30, 35, 15, 5, 10, 20, 25] UpperCAmelCase__ : Optional[int] = len(snake_case ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 UpperCAmelCase__ : Dict = matrix_chain_order(snake_case ) print("No. of Operation required: " + str(matrix[1][n - 1] ) ) print_optiomal_solution(snake_case , 1 , n - 1 ) if __name__ == "__main__": main()
371
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''efficientformer''' def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[str] = hidden_sizes UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[int] = patch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Optional[int] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Dict = downsamples UpperCAmelCase__ : Any = dim UpperCAmelCase__ : str = key_dim UpperCAmelCase__ : List[Any] = attention_ratio UpperCAmelCase__ : Optional[Any] = resolution UpperCAmelCase__ : Optional[Any] = pool_size UpperCAmelCase__ : Any = downsample_patch_size UpperCAmelCase__ : int = downsample_stride UpperCAmelCase__ : Dict = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[str] = distillation UpperCAmelCase__ : Dict = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Optional[int] = batch_norm_eps
298
0
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } _lowerCAmelCase : List[Any] = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } _lowerCAmelCase : int = { """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def SCREAMING_SNAKE_CASE__ ( snake_case )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = set() UpperCAmelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Dict = char UpperCAmelCase__ : Tuple = set(snake_case ) return pairs class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : Dict = vocab_file UpperCAmelCase__ : Tuple = merges_file UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Dict = 3 self.add_from_file(snake_case__ ) UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1] UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges] UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Dict = {} def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] UpperCAmelCase__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Tuple = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : Dict , snake_case__ : Tuple ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase__ : Any = get_pairs(snake_case__ ) if not pairs: return token while True: UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ : Tuple = bigram UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Tuple = 0 while i < len(snake_case__ ): try: UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Dict = j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Dict = tuple(snake_case__ ) UpperCAmelCase__ : List[Any] = new_word if len(snake_case__ ) == 1: break else: UpperCAmelCase__ : Dict = get_pairs(snake_case__ ) UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ ) UpperCAmelCase__ : Optional[int] = word[:-4] UpperCAmelCase__ : Union[str, Any] = word return word def __a ( self : List[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def __a ( self : Dict , snake_case__ : List[str] ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def __a ( self : List[Any] , snake_case__ : Any ): '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def __a ( self : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : Tuple = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : str = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ): copyfile(self.merges_file , snake_case__ ) return out_vocab_file, out_merge_file def __a ( self : List[Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): try: with open(snake_case__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' ) return UpperCAmelCase__ : Dict = f.readlines() for lineTmp in lines: UpperCAmelCase__ : Optional[int] = lineTmp.strip() UpperCAmelCase__ : Tuple = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) UpperCAmelCase__ : Any = line[:idx] UpperCAmelCase__ : str = len(self.encoder )
350
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
0
"""simple docstring""" from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''Salesforce/blip-image-captioning-base''' SCREAMING_SNAKE_CASE_ =( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) SCREAMING_SNAKE_CASE_ ='''image_captioner''' SCREAMING_SNAKE_CASE_ =AutoModelForVisionaSeq SCREAMING_SNAKE_CASE_ =['''image'''] SCREAMING_SNAKE_CASE_ =['''text'''] def __init__( self : str , *snake_case__ : Dict , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(self , ["vision"] ) super().__init__(*snake_case__ , **snake_case__ ) def __a ( self : Tuple , snake_case__ : "Image" ): '''simple docstring''' return self.pre_processor(images=snake_case__ , return_tensors="pt" ) def __a ( self : List[Any] , snake_case__ : Optional[int] ): '''simple docstring''' return self.model.generate(**snake_case__ ) def __a ( self : str , snake_case__ : str ): '''simple docstring''' return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0].strip()
351
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
298
0
"""simple docstring""" import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch _lowerCAmelCase : List[str] = logging.get_logger(__name__) @dataclass class lowerCAmelCase__ : def __init__( self : Optional[Any] , snake_case__ : Optional[int]=False , snake_case__ : int=False , snake_case__ : Any=6.0 , snake_case__ : List[str]=None , snake_case__ : Any=False , snake_case__ : Optional[int]=False , snake_case__ : Optional[int]=None , snake_case__ : List[Any]="fp4" , snake_case__ : Optional[Any]=False , **snake_case__ : Tuple , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = load_in_abit UpperCAmelCase__ : Tuple = load_in_abit UpperCAmelCase__ : List[Any] = llm_inta_threshold UpperCAmelCase__ : Optional[int] = llm_inta_skip_modules UpperCAmelCase__ : Any = llm_inta_enable_fpaa_cpu_offload UpperCAmelCase__ : Union[str, Any] = llm_inta_has_fpaa_weight UpperCAmelCase__ : Dict = bnb_abit_quant_type UpperCAmelCase__ : List[str] = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: UpperCAmelCase__ : List[str] = torch.floataa elif isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : str = getattr(snake_case__ , snake_case__ ) elif isinstance(snake_case__ , torch.dtype ): UpperCAmelCase__ : Optional[int] = bnb_abit_compute_dtype else: raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" ) self.post_init() def __a ( self : Union[str, Any] ): '''simple docstring''' if not isinstance(self.llm_inta_threshold , snake_case__ ): raise ValueError("llm_int8_threshold must be a float" ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , snake_case__ ): raise ValueError("llm_int8_skip_modules must be a list of strings" ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , snake_case__ ): raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" ) if not isinstance(self.llm_inta_has_fpaa_weight , snake_case__ ): raise ValueError("llm_int8_has_fp16_weight must be a boolean" ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" ) if not isinstance(self.bnb_abit_quant_type , snake_case__ ): raise ValueError("bnb_4bit_quant_type must be a string" ) if not isinstance(self.bnb_abit_use_double_quant , snake_case__ ): raise ValueError("bnb_4bit_use_double_quant must be a boolean" ) if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse( "0.39.0" ): raise ValueError( "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" ) def __a ( self : Union[str, Any] ): '''simple docstring''' return self.load_in_abit or self.load_in_abit def __a ( self : Union[str, Any] ): '''simple docstring''' if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def __a ( cls : str , snake_case__ : str , snake_case__ : int , **snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = cls(**snake_case__ ) UpperCAmelCase__ : Tuple = [] for key, value in kwargs.items(): if hasattr(snake_case__ , snake_case__ ): setattr(snake_case__ , snake_case__ , snake_case__ ) to_remove.append(snake_case__ ) for key in to_remove: kwargs.pop(snake_case__ , snake_case__ ) if return_unused_kwargs: return config, kwargs else: return config def __a ( self : str , snake_case__ : Union[str, os.PathLike] ): '''simple docstring''' with open(snake_case__ , "w" , encoding="utf-8" ) as writer: UpperCAmelCase__ : Dict = self.to_dict() UpperCAmelCase__ : int = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + "\n" writer.write(snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : List[str] = str(output["bnb_4bit_compute_dtype"] ).split("." )[1] return output def __repr__( self : List[str] ): '''simple docstring''' return f'{self.__class__.__name__} {self.to_json_string()}' def __a ( self : List[Any] , snake_case__ : bool = True ): '''simple docstring''' if use_diff is True: UpperCAmelCase__ : Any = self.to_diff_dict() else: UpperCAmelCase__ : Optional[int] = self.to_dict() return json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + "\n" def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.to_dict() # get the default config dict UpperCAmelCase__ : List[str] = BitsAndBytesConfig().to_dict() UpperCAmelCase__ : str = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: UpperCAmelCase__ : Any = value return serializable_config_dict
352
"""simple docstring""" import functools def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int: '''simple docstring''' UpperCAmelCase__ : List[str] = len(snake_case ) UpperCAmelCase__ : str = len(snake_case ) @functools.cache def min_distance(snake_case : int , snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" import math def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : str = len(snake_case ) UpperCAmelCase__ : List[str] = int(math.floor(math.sqrt(snake_case ) ) ) UpperCAmelCase__ : Union[str, Any] = 0 while arr[min(snake_case , snake_case ) - 1] < x: UpperCAmelCase__ : Union[str, Any] = step step += int(math.floor(math.sqrt(snake_case ) ) ) if prev >= n: return -1 while arr[prev] < x: UpperCAmelCase__ : int = prev + 1 if prev == min(snake_case , snake_case ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": _lowerCAmelCase : List[str] = input("""Enter numbers separated by a comma:\n""").strip() _lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")] _lowerCAmelCase : Tuple = int(input("""Enter the number to be searched:\n""")) _lowerCAmelCase : str = jump_search(arr, x) if res == -1: print("""Number not found!""") else: print(F"""Number {x} is at index {res}""")
353
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : List[Any] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase__ : Tuple = input_file.read() UpperCAmelCase__ : Tuple = regexp.search(snake_case__ ) return match def __a ( self : List[str] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase__ : Dict = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase__ : int = regexp.finditer(snake_case__ ) UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Path("./datasets" ) UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case__ ) ): raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = Path("./datasets" ) UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case__ ) ): raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
298
0
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Optional[Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 3 UpperCAmelCase__ : List[Any] = 2_5_0 UpperCAmelCase__ : List[str] = ids_tensor((batch_size, length) , snake_case__ ) UpperCAmelCase__ : Any = torch.ones((batch_size, length) , device=snake_case__ , dtype=torch.float ) / length return input_ids, scores def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self._get_tensors(5 ) UpperCAmelCase__ : List[Any] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(snake_case__ , snake_case__ ) ) UpperCAmelCase__ : int = self._get_tensors(9 ) self.assertFalse(criteria(snake_case__ , snake_case__ ) ) UpperCAmelCase__ : int = self._get_tensors(1_0 ) self.assertTrue(criteria(snake_case__ , snake_case__ ) ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = MaxLengthCriteria(max_length=1_0 ) UpperCAmelCase__ : Optional[int] = self._get_tensors(5 ) self.assertFalse(criteria(snake_case__ , snake_case__ ) ) UpperCAmelCase__ : Union[str, Any] = self._get_tensors(9 ) self.assertFalse(criteria(snake_case__ , snake_case__ ) ) UpperCAmelCase__ : Optional[int] = self._get_tensors(1_0 ) self.assertTrue(criteria(snake_case__ , snake_case__ ) ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) UpperCAmelCase__ : Optional[int] = self._get_tensors(5 ) self.assertFalse(criteria(snake_case__ , snake_case__ ) ) UpperCAmelCase__ : List[Any] = self._get_tensors(9 ) self.assertFalse(criteria(snake_case__ , snake_case__ ) ) UpperCAmelCase__ : Any = self._get_tensors(1_0 ) self.assertTrue(criteria(snake_case__ , snake_case__ ) ) UpperCAmelCase__ : List[str] = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = self._get_tensors(5 ) UpperCAmelCase__ : Union[str, Any] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(snake_case__ , snake_case__ ) ) UpperCAmelCase__ : Union[str, Any] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(snake_case__ , snake_case__ ) ) def __a ( self : str ): '''simple docstring''' validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(snake_case__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) UpperCAmelCase__ : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(snake_case__ ) , 1 )
354
"""simple docstring""" import numpy as np import datasets _lowerCAmelCase : Optional[int] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ _lowerCAmelCase : Tuple = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ _lowerCAmelCase : Optional[int] = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ): '''simple docstring''' # convert to numpy arrays UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ ) UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T ) try: UpperCAmelCase__ : str = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ ) UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ ) UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict , snake_case : Tuple , snake_case : str )-> List[Any]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: UpperCAmelCase__ : List[Any] = mf_knapsack(i - 1 , snake_case , snake_case , snake_case ) else: UpperCAmelCase__ : Tuple = max( mf_knapsack(i - 1 , snake_case , snake_case , snake_case ) , mf_knapsack(i - 1 , snake_case , snake_case , j - wt[i - 1] ) + val[i - 1] , ) UpperCAmelCase__ : str = val return f[i][j] def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : List[str] , snake_case : Optional[Any] , snake_case : List[Any] )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : Dict = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: UpperCAmelCase__ : int = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: UpperCAmelCase__ : List[str] = dp[i - 1][w_] return dp[n][w_], dp def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : list , snake_case : list )-> Union[str, Any]: '''simple docstring''' if not (isinstance(snake_case , (list, tuple) ) and isinstance(snake_case , (list, tuple) )): raise ValueError( "Both the weights and values vectors must be either lists or tuples" ) UpperCAmelCase__ : str = len(snake_case ) if num_items != len(snake_case ): UpperCAmelCase__ : List[str] = ( "The number of weights must be the same as the number of values.\n" f'But got {num_items} weights and {len(snake_case )} values' ) raise ValueError(snake_case ) for i in range(snake_case ): if not isinstance(wt[i] , snake_case ): UpperCAmelCase__ : Union[str, Any] = ( "All weights must be integers but got weight of " f'type {type(wt[i] )} at index {i}' ) raise TypeError(snake_case ) UpperCAmelCase__ : str = knapsack(snake_case , snake_case , snake_case , snake_case ) UpperCAmelCase__ : set = set() _construct_solution(snake_case , snake_case , snake_case , snake_case , snake_case ) return optimal_val, example_optional_set def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : list , snake_case : int , snake_case : int , snake_case : set )-> Any: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(snake_case , snake_case , i - 1 , snake_case , snake_case ) else: optimal_set.add(snake_case ) _construct_solution(snake_case , snake_case , i - 1 , j - wt[i - 1] , snake_case ) if __name__ == "__main__": _lowerCAmelCase : List[str] = [3, 2, 4, 4] _lowerCAmelCase : List[str] = [4, 3, 2, 3] _lowerCAmelCase : str = 4 _lowerCAmelCase : Union[str, Any] = 6 _lowerCAmelCase : Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _lowerCAmelCase : Tuple = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _lowerCAmelCase : str = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
355
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =IFPipeline SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self : Dict ): '''simple docstring''' return self._get_dummy_components() def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): UpperCAmelCase__ : str = torch.manual_seed(snake_case__ ) else: UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase__ : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self : Tuple ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self : Tuple ): '''simple docstring''' # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __a ( self : Dict ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __a ( self : int ): '''simple docstring''' self._test_save_load_local() def __a ( self : Any ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self : Optional[Any] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : str ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Tuple ): '''simple docstring''' # if UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : List[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : str = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Tuple = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : str = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Optional[Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> list: '''simple docstring''' return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(snake_case ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("""doctest""").testmod()
356
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } _lowerCAmelCase : List[Any] = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } _lowerCAmelCase : int = { """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = set() UpperCAmelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Dict = char UpperCAmelCase__ : Tuple = set(snake_case ) return pairs class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : Dict = vocab_file UpperCAmelCase__ : Tuple = merges_file UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Dict = 3 self.add_from_file(snake_case__ ) UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1] UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges] UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Dict = {} def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] UpperCAmelCase__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Tuple = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : Dict , snake_case__ : Tuple ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase__ : Any = get_pairs(snake_case__ ) if not pairs: return token while True: UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Tuple = 0 while i < len(snake_case__ ): try: UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Dict = j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Dict = tuple(snake_case__ ) UpperCAmelCase__ : List[Any] = new_word if len(snake_case__ ) == 1: break else: UpperCAmelCase__ : Dict = get_pairs(snake_case__ ) UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ ) UpperCAmelCase__ : Optional[int] = word[:-4] UpperCAmelCase__ : Union[str, Any] = word return word def __a ( self : List[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def __a ( self : Dict , snake_case__ : List[str] ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def __a ( self : List[Any] , snake_case__ : Any ): '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def __a ( self : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : Tuple = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : str = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ): copyfile(self.merges_file , snake_case__ ) return out_vocab_file, out_merge_file def __a ( self : List[Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): try: with open(snake_case__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' ) return UpperCAmelCase__ : Dict = f.readlines() for lineTmp in lines: UpperCAmelCase__ : Optional[int] = lineTmp.strip() UpperCAmelCase__ : Tuple = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) UpperCAmelCase__ : Any = line[:idx] UpperCAmelCase__ : str = len(self.encoder )
298
0
"""simple docstring""" import functools def SCREAMING_SNAKE_CASE__ ( snake_case : list[int] , snake_case : list[int] )-> int: '''simple docstring''' if not isinstance(snake_case , snake_case ) or not all(isinstance(snake_case , snake_case ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(snake_case ) != 3 or not all(isinstance(snake_case , snake_case ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(snake_case ) == 0: return 0 if min(snake_case ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(snake_case ) >= 366: raise ValueError("All days elements should be less than 366" ) UpperCAmelCase__ : List[str] = set(snake_case ) @functools.cache def dynamic_programming(snake_case : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
357
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 # setable values SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None @classmethod def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ ) @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ =42 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return True @register_to_config def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ): '''simple docstring''' UpperCAmelCase__ : Tuple = dtype def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ): '''simple docstring''' if common is None: UpperCAmelCase__ : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype ) UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , ) def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ): '''simple docstring''' return sample def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case__ , timesteps=snake_case__ , ) def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: UpperCAmelCase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": UpperCAmelCase__ : List[Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": UpperCAmelCase__ : List[str] = variance UpperCAmelCase__ : Optional[Any] = state.common.betas[t] UpperCAmelCase__ : Any = (predicted_variance + 1) / 2 UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if key is None: UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 ) else: UpperCAmelCase__ : int = None # 1. compute alphas, betas UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) UpperCAmelCase__ : List[str] = 1 - alpha_prod_t UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 ) UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ ) def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __len__( self : Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
298
0
"""simple docstring""" import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : int , snake_case : Optional[int] , snake_case : Optional[int] )-> str: '''simple docstring''' if isinstance(snake_case , snake_case ): UpperCAmelCase__ : Dict = np.full((len(snake_case ), sequence_length, 2) , snake_case ) else: UpperCAmelCase__ : List[str] = np.full((len(snake_case ), sequence_length) , snake_case ) for i, tensor in enumerate(snake_case ): if padding_side == "right": if isinstance(snake_case , snake_case ): UpperCAmelCase__ : List[str] = tensor[:sequence_length] else: UpperCAmelCase__ : Dict = tensor[:sequence_length] else: if isinstance(snake_case , snake_case ): UpperCAmelCase__ : Tuple = tensor[:sequence_length] else: UpperCAmelCase__ : Tuple = tensor[:sequence_length] return out_tensor.tolist() def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' UpperCAmelCase__ : Optional[int] = ord(snake_case ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True UpperCAmelCase__ : Optional[int] = unicodedata.category(snake_case ) if cat.startswith("P" ): return True return False @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =True SCREAMING_SNAKE_CASE_ =None SCREAMING_SNAKE_CASE_ =None SCREAMING_SNAKE_CASE_ =-100 SCREAMING_SNAKE_CASE_ ='''pt''' def __a ( self : Union[str, Any] , snake_case__ : Any ): '''simple docstring''' import torch UpperCAmelCase__ : List[Any] = "label" if "label" in features[0].keys() else "labels" UpperCAmelCase__ : int = [feature[label_name] for feature in features] if label_name in features[0].keys() else None UpperCAmelCase__ : Any = self.tokenizer.pad( snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , ) if labels is None: return batch UpperCAmelCase__ : Any = torch.tensor(batch["entity_ids"] ).shape[1] UpperCAmelCase__ : List[Any] = self.tokenizer.padding_side if padding_side == "right": UpperCAmelCase__ : List[str] = [ list(snake_case__ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) for label in labels ] else: UpperCAmelCase__ : str = [ [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) + list(snake_case__ ) for label in labels ] UpperCAmelCase__ : Dict = [feature["ner_tags"] for feature in features] UpperCAmelCase__ : str = padding_tensor(snake_case__ , -1 , snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[int] = [feature["original_entity_spans"] for feature in features] UpperCAmelCase__ : str = padding_tensor(snake_case__ , (-1, -1) , snake_case__ , snake_case__ ) UpperCAmelCase__ : Union[str, Any] = {k: torch.tensor(snake_case__ , dtype=torch.intaa ) for k, v in batch.items()} return batch
358
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCAmelCase__ : def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : Union[str, Any] = act_dim UpperCAmelCase__ : Dict = state_dim UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : List[str] = max_length UpperCAmelCase__ : int = is_training def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 ) UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase__ : Optional[int] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __a ( self : int ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase__ : Optional[int] = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =() SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids SCREAMING_SNAKE_CASE_ =False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = DecisionTransformerModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : List[str] ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Tuple = [*signature.parameters.keys()] UpperCAmelCase__ : str = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Optional[int] = model.config torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset() UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ ) UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase__ : Union[str, Any] = state UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 ) for step in range(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model( states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1] UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 ) UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase__ : Tuple = torch.cat( [timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
298
0
"""simple docstring""" import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _lowerCAmelCase : Tuple = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ """text-classification""", """language-modeling""", """summarization""", """token-classification""", """question-answering""", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase : Union[str, Any] = logging.getLogger() def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase__ : str = parser.parse_args() return args.f def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Union[str, Any]="eval" )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : str = os.path.join(snake_case , f'{split}_results.json' ) if os.path.exists(snake_case ): with open(snake_case , "r" ) as f: return json.load(snake_case ) raise ValueError(f'can\'t find {path}' ) _lowerCAmelCase : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.get_auto_remove_tmp_dir() UpperCAmelCase__ : Tuple = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split() with patch.object(snake_case__ , "argv" , snake_case__ ): run_flax_glue.main() UpperCAmelCase__ : List[Any] = get_results(snake_case__ ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) @slow def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.get_auto_remove_tmp_dir() UpperCAmelCase__ : Union[str, Any] = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split() with patch.object(snake_case__ , "argv" , snake_case__ ): run_clm_flax.main() UpperCAmelCase__ : Dict = get_results(snake_case__ ) self.assertLess(result["eval_perplexity"] , 1_0_0 ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_auto_remove_tmp_dir() UpperCAmelCase__ : int = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split() with patch.object(snake_case__ , "argv" , snake_case__ ): run_summarization_flax.main() UpperCAmelCase__ : str = get_results(snake_case__ , split="test" ) self.assertGreaterEqual(result["test_rouge1"] , 1_0 ) self.assertGreaterEqual(result["test_rouge2"] , 2 ) self.assertGreaterEqual(result["test_rougeL"] , 7 ) self.assertGreaterEqual(result["test_rougeLsum"] , 7 ) @slow def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_auto_remove_tmp_dir() UpperCAmelCase__ : int = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split() with patch.object(snake_case__ , "argv" , snake_case__ ): run_mlm_flax.main() UpperCAmelCase__ : List[str] = get_results(snake_case__ ) self.assertLess(result["eval_perplexity"] , 4_2 ) @slow def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir() UpperCAmelCase__ : str = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split() with patch.object(snake_case__ , "argv" , snake_case__ ): run_ta_mlm_flax.main() UpperCAmelCase__ : int = get_results(snake_case__ ) self.assertGreaterEqual(result["eval_accuracy"] , 0.42 ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase__ : Any = self.get_auto_remove_tmp_dir() UpperCAmelCase__ : Tuple = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split() with patch.object(snake_case__ , "argv" , snake_case__ ): run_flax_ner.main() UpperCAmelCase__ : int = get_results(snake_case__ ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertGreaterEqual(result["eval_f1"] , 0.3 ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = self.get_auto_remove_tmp_dir() UpperCAmelCase__ : Dict = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split() with patch.object(snake_case__ , "argv" , snake_case__ ): run_qa.main() UpperCAmelCase__ : int = get_results(snake_case__ ) self.assertGreaterEqual(result["eval_f1"] , 3_0 ) self.assertGreaterEqual(result["eval_exact"] , 3_0 )
359
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : list , snake_case : int , snake_case : int , snake_case : int )-> int: '''simple docstring''' if index == number_of_items: return 0 UpperCAmelCase__ : Any = 0 UpperCAmelCase__ : int = 0 UpperCAmelCase__ : Tuple = knapsack(snake_case , snake_case , snake_case , snake_case , index + 1 ) if weights[index] <= max_weight: UpperCAmelCase__ : Union[str, Any] = values[index] + knapsack( snake_case , snake_case , snake_case , max_weight - weights[index] , index + 1 ) return max(snake_case , snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
360
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Any = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''mobilenet_v2''' def __init__( self : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : str=2_2_4 , snake_case__ : Dict=1.0 , snake_case__ : List[str]=8 , snake_case__ : List[Any]=8 , snake_case__ : Tuple=6 , snake_case__ : Optional[Any]=3_2 , snake_case__ : List[str]=True , snake_case__ : Dict=True , snake_case__ : Union[str, Any]="relu6" , snake_case__ : str=True , snake_case__ : List[str]=0.8 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Union[str, Any]=0.001 , snake_case__ : List[str]=2_5_5 , **snake_case__ : Any , ): '''simple docstring''' super().__init__(**snake_case__ ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) UpperCAmelCase__ : Union[str, Any] = num_channels UpperCAmelCase__ : List[str] = image_size UpperCAmelCase__ : Optional[int] = depth_multiplier UpperCAmelCase__ : Optional[Any] = depth_divisible_by UpperCAmelCase__ : Optional[Any] = min_depth UpperCAmelCase__ : List[Any] = expand_ratio UpperCAmelCase__ : List[Any] = output_stride UpperCAmelCase__ : Optional[int] = first_layer_is_expansion UpperCAmelCase__ : Any = finegrained_output UpperCAmelCase__ : Optional[int] = hidden_act UpperCAmelCase__ : int = tf_padding UpperCAmelCase__ : List[Any] = classifier_dropout_prob UpperCAmelCase__ : int = initializer_range UpperCAmelCase__ : List[str] = layer_norm_eps UpperCAmelCase__ : Any = semantic_loss_ignore_index class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =version.parse('''1.11''' ) @property def __a ( self : int ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self : Dict ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self : List[str] ): '''simple docstring''' return 1e-4
361
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy() UpperCAmelCase__ : List[Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
298
0
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =XLMTokenizer SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case__ ) ) def __a ( self : Union[str, Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = "lower newer" UpperCAmelCase__ : Optional[Any] = "lower newer" return input_text, output_text def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : List[Any] = "lower" UpperCAmelCase__ : Any = ["low", "er</w>"] UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"] UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
362
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : int = seq_length UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Union[str, Any] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Any = embedding_size UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : List[Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Tuple = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : Union[str, Any] = scope def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = AlbertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_labels UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.num_labels UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_choices UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Tuple = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =True def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ): '''simple docstring''' UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Dict = type self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" ) UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case__ ) UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
298
0
"""simple docstring""" import importlib.metadata import operator import re import sys from typing import Optional from packaging import version _lowerCAmelCase : Any = { """<""": operator.lt, """<=""": operator.le, """==""": operator.eq, """!=""": operator.ne, """>=""": operator.ge, """>""": operator.gt, } def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[Any] )-> Optional[Any]: '''simple docstring''' if got_ver is None or want_ver is None: raise ValueError( f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider' f' reinstalling {pkg}.' ) if not ops[op](version.parse(snake_case ) , version.parse(snake_case ) ): raise ImportError( f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> None: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'\n{hint}' if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$" , snake_case ): UpperCAmelCase__ : Tuple = requirement, None, None else: UpperCAmelCase__ : str = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , snake_case ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" f' got {requirement}' ) UpperCAmelCase__ : List[Any] = match[0] UpperCAmelCase__ : Union[str, Any] = want_full.split("," ) # there could be multiple requirements UpperCAmelCase__ : List[str] = {} for w in want_range: UpperCAmelCase__ : Optional[Any] = re.findall(r"^([\s!=<>]{1,2})(.+)" , snake_case ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," f' but got {requirement}' ) UpperCAmelCase__ : str = match[0] UpperCAmelCase__ : str = want_ver if op not in ops: raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' ) # special case if pkg == "python": UpperCAmelCase__ : Optional[int] = ".".join([str(snake_case ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) return # check if any version is installed try: UpperCAmelCase__ : Optional[Any] = importlib.metadata.version(snake_case ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(snake_case , snake_case )
363
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = list(range(snake_case ) ) # Find permutation while factorials: UpperCAmelCase__ : str = factorials.pop() UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = tempfile.mkdtemp() # fmt: off UpperCAmelCase__ : List[str] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCAmelCase__ : Dict = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCAmelCase__ : Optional[int] = {"unk_token": "<unk>"} UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) UpperCAmelCase__ : Union[str, Any] = { "do_resize": True, "size": 2_0, "do_center_crop": True, "crop_size": 1_8, "do_normalize": True, "image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073], "image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711], } UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , snake_case__ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(snake_case__ , snake_case__ ) def __a ( self : Dict , **snake_case__ : Any ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **snake_case__ ) def __a ( self : Tuple , **snake_case__ : Any ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **snake_case__ ) def __a ( self : Optional[Any] , **snake_case__ : Optional[Any] ): '''simple docstring''' return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] UpperCAmelCase__ : List[str] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_tokenizer() UpperCAmelCase__ : int = self.get_rust_tokenizer() UpperCAmelCase__ : Dict = self.get_image_processor() UpperCAmelCase__ : str = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ ) UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case__ ) self.assertIsInstance(processor_fast.tokenizer , snake_case__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case__ ) self.assertIsInstance(processor_fast.image_processor , snake_case__ ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCAmelCase__ : int = self.get_image_processor(do_normalize=snake_case__ ) UpperCAmelCase__ : Optional[int] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_image_processor() UpperCAmelCase__ : Dict = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase__ : str = self.prepare_image_inputs() UpperCAmelCase__ : Optional[Any] = image_processor(snake_case__ , return_tensors="np" ) UpperCAmelCase__ : int = processor(images=snake_case__ , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_image_processor() UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : List[str] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase__ : List[Any] = "lower newer" UpperCAmelCase__ : Union[str, Any] = processor(text=snake_case__ , return_tensors="np" ) UpperCAmelCase__ : Optional[Any] = tokenizer(snake_case__ , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_image_processor() UpperCAmelCase__ : List[str] = self.get_tokenizer() UpperCAmelCase__ : List[Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase__ : Optional[int] = "lower newer" UpperCAmelCase__ : Optional[int] = self.prepare_image_inputs() UpperCAmelCase__ : Optional[Any] = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = "google/owlvit-base-patch32" UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor.from_pretrained(snake_case__ ) UpperCAmelCase__ : Tuple = ["cat", "nasa badge"] UpperCAmelCase__ : Any = processor(text=snake_case__ ) UpperCAmelCase__ : Dict = 1_6 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = "google/owlvit-base-patch32" UpperCAmelCase__ : Dict = OwlViTProcessor.from_pretrained(snake_case__ ) UpperCAmelCase__ : List[str] = [["cat", "nasa badge"], ["person"]] UpperCAmelCase__ : Union[str, Any] = processor(text=snake_case__ ) UpperCAmelCase__ : Any = 1_6 UpperCAmelCase__ : Optional[Any] = len(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = max([len(snake_case__ ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = "google/owlvit-base-patch32" UpperCAmelCase__ : List[Any] = OwlViTProcessor.from_pretrained(snake_case__ ) UpperCAmelCase__ : Dict = ["cat", "nasa badge"] UpperCAmelCase__ : Tuple = processor(text=snake_case__ ) UpperCAmelCase__ : Union[str, Any] = 1_6 UpperCAmelCase__ : List[Any] = inputs["input_ids"] UpperCAmelCase__ : str = [ [4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_image_processor() UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase__ : List[str] = self.prepare_image_inputs() UpperCAmelCase__ : Optional[Any] = self.prepare_image_inputs() UpperCAmelCase__ : int = processor(images=snake_case__ , query_images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_image_processor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Dict = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase__ : str = processor.batch_decode(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ )
364
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _lowerCAmelCase : Union[str, Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0} UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : int = min_resolution UpperCAmelCase__ : Tuple = max_resolution UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : Optional[int] = do_normalize UpperCAmelCase__ : str = do_convert_rgb UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def __a ( self : str ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = PixaStructImageProcessingTester(self ) @property def __a ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ : Dict = 2_0_4_8 UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase__ : Optional[int] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(snake_case__ ): UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches UpperCAmelCase__ : Optional[Any] = "Hello" UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Dict ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Dict = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : List[str] = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Optional[int] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase__ : Optional[int] = 3 @property def __a ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : int ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
298
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase : str = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> YolosConfig: '''simple docstring''' UpperCAmelCase__ : List[Any] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: UpperCAmelCase__ : Any = 192 UpperCAmelCase__ : str = 768 UpperCAmelCase__ : List[str] = 12 UpperCAmelCase__ : int = 3 UpperCAmelCase__ : List[Any] = [800, 1333] UpperCAmelCase__ : List[str] = False elif yolos_name == "yolos_s_dWr": UpperCAmelCase__ : int = 330 UpperCAmelCase__ : List[Any] = 14 UpperCAmelCase__ : Optional[int] = 6 UpperCAmelCase__ : Optional[int] = 1320 elif "yolos_s" in yolos_name: UpperCAmelCase__ : Any = 384 UpperCAmelCase__ : List[Any] = 1536 UpperCAmelCase__ : List[str] = 12 UpperCAmelCase__ : Tuple = 6 elif "yolos_b" in yolos_name: UpperCAmelCase__ : Tuple = [800, 1344] UpperCAmelCase__ : Optional[Any] = 91 UpperCAmelCase__ : str = "huggingface/label-files" UpperCAmelCase__ : Optional[int] = "coco-detection-id2label.json" UpperCAmelCase__ : Optional[int] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) ) UpperCAmelCase__ : List[str] = {int(snake_case ): v for k, v in idalabel.items()} UpperCAmelCase__ : Union[str, Any] = idalabel UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE__ ( snake_case : dict , snake_case : YolosConfig , snake_case : bool = False )-> Optional[int]: '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase__ : List[str] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) UpperCAmelCase__ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :] UpperCAmelCase__ : Optional[int] = in_proj_bias[: config.hidden_size] UpperCAmelCase__ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase__ : Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase__ : Optional[int] = in_proj_weight[-config.hidden_size :, :] UpperCAmelCase__ : str = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' if "backbone" in name: UpperCAmelCase__ : str = name.replace("backbone" , "vit" ) if "cls_token" in name: UpperCAmelCase__ : Dict = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: UpperCAmelCase__ : Optional[int] = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: UpperCAmelCase__ : Dict = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: UpperCAmelCase__ : Optional[Any] = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: UpperCAmelCase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: UpperCAmelCase__ : Tuple = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: UpperCAmelCase__ : Tuple = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase__ : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase__ : Any = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase__ : List[Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase__ : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase__ : Dict = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: UpperCAmelCase__ : Dict = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: UpperCAmelCase__ : str = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: UpperCAmelCase__ : Any = name.replace("vit.norm" , "vit.layernorm" ) return name def SCREAMING_SNAKE_CASE__ ( snake_case : dict , snake_case : YolosForObjectDetection )-> dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase__ : List[str] = orig_state_dict.pop(snake_case ) if "qkv" in key: UpperCAmelCase__ : Union[str, Any] = key.split("." ) UpperCAmelCase__ : int = int(key_split[2] ) UpperCAmelCase__ : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: UpperCAmelCase__ : Optional[Any] = val[:dim, :] UpperCAmelCase__ : Union[str, Any] = val[ dim : dim * 2, : ] UpperCAmelCase__ : int = val[-dim:, :] else: UpperCAmelCase__ : int = val[:dim] UpperCAmelCase__ : int = val[dim : dim * 2] UpperCAmelCase__ : Union[str, Any] = val[-dim:] else: UpperCAmelCase__ : List[str] = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( )-> torch.Tensor: '''simple docstring''' UpperCAmelCase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case , stream=snake_case ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : str , snake_case : bool = False )-> int: '''simple docstring''' UpperCAmelCase__ : Any = get_yolos_config(snake_case ) # load original state_dict UpperCAmelCase__ : List[str] = torch.load(snake_case , map_location="cpu" )["model"] # load 🤗 model UpperCAmelCase__ : Tuple = YolosForObjectDetection(snake_case ) model.eval() UpperCAmelCase__ : Tuple = convert_state_dict(snake_case , snake_case ) model.load_state_dict(snake_case ) # Check outputs on an image, prepared by YolosImageProcessor UpperCAmelCase__ : Tuple = 800 if yolos_name != "yolos_ti" else 512 UpperCAmelCase__ : Optional[int] = YolosImageProcessor(format="coco_detection" , size=snake_case ) UpperCAmelCase__ : Tuple = image_processor(images=prepare_img() , return_tensors="pt" ) UpperCAmelCase__ : Optional[int] = model(**snake_case ) UpperCAmelCase__ : Tuple = outputs.logits, outputs.pred_boxes UpperCAmelCase__ : Optional[int] = None, None if yolos_name == "yolos_ti": UpperCAmelCase__ : List[Any] = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": UpperCAmelCase__ : Optional[Any] = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": UpperCAmelCase__ : List[str] = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) UpperCAmelCase__ : Any = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": UpperCAmelCase__ : Optional[int] = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) UpperCAmelCase__ : List[Any] = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) UpperCAmelCase__ : Tuple = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'Unknown yolos_name: {yolos_name}' ) assert torch.allclose(logits[0, :3, :3] , snake_case , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , snake_case , atol=1E-4 ) Path(snake_case ).mkdir(exist_ok=snake_case ) print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case ) if push_to_hub: UpperCAmelCase__ : List[str] = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) UpperCAmelCase__ : Optional[Any] = model_mapping[yolos_name] image_processor.push_to_hub(snake_case , organization="hustvl" ) model.push_to_hub(snake_case , organization="hustvl" ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--yolos_name""", default="""yolos_s_200_pre""", type=str, help=( """Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',""" """ 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'.""" ), ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
365
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : int = "mock-s3-bucket" UpperCAmelCase__ : Any = f's3://{mock_bucket}' UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case ) assert dataset_path.startswith("s3://" ) is False UpperCAmelCase__ : str = "./local/path" UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is True UpperCAmelCase__ : str = fsspec.filesystem("file" ) UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case ) UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} UpperCAmelCase__ : int = compressed_file_paths[protocol] UpperCAmelCase__ : Any = "dataset.jsonl" UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case ) UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(snake_case ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
298
0
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class lowerCAmelCase__ ( yaml.SafeLoader ): def __a ( self : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] UpperCAmelCase__ : Union[str, Any] = [tuple(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else key for key in keys] UpperCAmelCase__ : List[str] = Counter(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' ) def __a ( self : int , snake_case__ : Optional[int] , snake_case__ : Tuple=False ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = super().construct_mapping(snake_case__ , deep=snake_case__ ) self._check_no_duplicates_on_constructed_node(snake_case__ ) return mapping def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> Tuple[Optional[str], str]: UpperCAmelCase__ : Union[str, Any] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: UpperCAmelCase__ : str = full_content[1:].index("---" ) + 1 UpperCAmelCase__ : Union[str, Any] = "\n".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(snake_case ) class lowerCAmelCase__ ( __magic_name__ ): # class attributes SCREAMING_SNAKE_CASE_ ={'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def __a ( cls : Dict , snake_case__ : Path ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as readme_file: UpperCAmelCase__ : Union[str, Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(snake_case__ ) else: return cls() def __a ( self : Any , snake_case__ : Path ): '''simple docstring''' if path.exists(): with open(snake_case__ , encoding="utf-8" ) as readme_file: UpperCAmelCase__ : List[str] = readme_file.read() else: UpperCAmelCase__ : Dict = None UpperCAmelCase__ : Optional[Any] = self._to_readme(snake_case__ ) with open(snake_case__ , "w" , encoding="utf-8" ) as readme_file: readme_file.write(snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : Optional[str] = None ): '''simple docstring''' if readme_content is not None: UpperCAmelCase__ : Optional[Any] = _split_yaml_from_readme(snake_case__ ) UpperCAmelCase__ : int = "---\n" + self.to_yaml_string() + "---\n" + content else: UpperCAmelCase__ : Any = "---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def __a ( cls : Optional[Any] , snake_case__ : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = yaml.load(snake_case__ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields UpperCAmelCase__ : int = { (key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' return yaml.safe_dump( { (key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=snake_case__ , allow_unicode=snake_case__ , encoding="utf-8" , ).decode("utf-8" ) _lowerCAmelCase : Tuple = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser _lowerCAmelCase : List[str] = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") _lowerCAmelCase : Union[str, Any] = ap.parse_args() _lowerCAmelCase : Union[str, Any] = Path(args.readme_filepath) _lowerCAmelCase : Dict = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''megatron-bert''' def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Any = max_position_embeddings UpperCAmelCase__ : Dict = type_vocab_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : int = layer_norm_eps UpperCAmelCase__ : Optional[Any] = position_embedding_type UpperCAmelCase__ : Any = use_cache
298
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : Union[str, Any] , *snake_case__ : Any , **snake_case__ : Dict ): '''simple docstring''' warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , snake_case__ , ) super().__init__(*snake_case__ , **snake_case__ )
367
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Dict ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , ) def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def __a ( self : Any , snake_case__ : str , snake_case__ : str ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class lowerCAmelCase__ ( __magic_name__ ): @require_beam def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : Dict ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Dict = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
0
"""simple docstring""" from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata _lowerCAmelCase : int = """""" if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""): class lowerCAmelCase__ ( tr.AbstractTransform ): def __init__( self : Tuple , snake_case__ : str = " " ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = sentence_delimiter def __a ( self : Optional[Any] , snake_case__ : str ): '''simple docstring''' return list(snake_case__ ) def __a ( self : List[str] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = [] for sent_idx, sentence in enumerate(snake_case__ ): chars.extend(self.process_string(snake_case__ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(snake_case__ ) - 1: chars.append(self.sentence_delimiter ) return chars _lowerCAmelCase : Optional[int] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: _lowerCAmelCase : List[Any] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) _lowerCAmelCase : Dict = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ _lowerCAmelCase : Optional[int] = """\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. """ _lowerCAmelCase : Any = """ Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> cer = datasets.load_metric(\"cer\") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", "https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates", ] , ) def __a ( self : Tuple , snake_case__ : str , snake_case__ : int , snake_case__ : str=False ): '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , )["wer"] UpperCAmelCase__ : int = 0 UpperCAmelCase__ : str = 0 for prediction, reference in zip(snake_case__ , snake_case__ ): UpperCAmelCase__ : Union[str, Any] = jiwer.compute_measures( snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
368
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =XLMTokenizer SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case__ ) ) def __a ( self : Union[str, Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = "lower newer" UpperCAmelCase__ : Optional[Any] = "lower newer" return input_text, output_text def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : List[Any] = "lower" UpperCAmelCase__ : Any = ["low", "er</w>"] UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"] UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
298
0
import random def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : float , snake_case : bool = False )-> dict: '''simple docstring''' UpperCAmelCase__ : dict = {i: [] for i in range(snake_case )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(snake_case ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(snake_case ): for j in range(i + 1 , snake_case ): if random.random() < probability: graph[i].append(snake_case ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(snake_case ) return graph def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> dict: '''simple docstring''' return { i: [j for j in range(snake_case ) if i != j] for i in range(snake_case ) } if __name__ == "__main__": import doctest doctest.testmod()
369
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ): '''simple docstring''' UpperCAmelCase__ : Any = "bilinear" UpperCAmelCase__ : Any = max_size UpperCAmelCase__ : Any = short_edge_length def __call__( self : Dict , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for img in imgs: UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ ) if h < w: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w else: UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size if max(snake_case__ , snake_case__ ) > self.max_size: UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = newh * scale UpperCAmelCase__ : int = neww * scale UpperCAmelCase__ : List[Any] = int(neww + 0.5 ) UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase__ : Any = Image.fromarray(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ ) else: UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase__ : Tuple = nn.functional.interpolate( snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 ) img_augs.append(snake_case__ ) return img_augs class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase__ : Any = cfg.INPUT.FORMAT UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY UpperCAmelCase__ : str = cfg.PAD_VALUE UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std def __a ( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images] UpperCAmelCase__ : int = [ nn.functional.pad( snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case__ , snake_case__ ) ] return torch.stack(snake_case__ ), torch.tensor(snake_case__ ) def __call__( self : str , snake_case__ : int , snake_case__ : int=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [images] if single_image: assert len(snake_case__ ) == 1 for i in range(len(snake_case__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase__ : Tuple = self.aug(snake_case__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images] # now pad them to do the following operations UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int: '''simple docstring''' assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size tensor[:, 0].clamp_(min=0 , max=snake_case ) tensor[:, 1].clamp_(min=0 , max=snake_case ) tensor[:, 2].clamp_(min=0 , max=snake_case ) tensor[:, 3].clamp_(min=0 , max=snake_case )
298
0
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
370
"""simple docstring""" import qiskit def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" ) UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
298
0
"""simple docstring""" import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =1 @register_to_config def __init__( self : List[str] , snake_case__ : int = 1_0_0_0 , snake_case__ : Optional[Union[np.ndarray, List[float]]] = None ): '''simple docstring''' self.set_timesteps(snake_case__ ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Union[str, Any] = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. UpperCAmelCase__ : List[str] = 4 # running values UpperCAmelCase__ : List[str] = [] def __a ( self : Tuple , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ): '''simple docstring''' UpperCAmelCase__ : int = num_inference_steps UpperCAmelCase__ : str = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] UpperCAmelCase__ : Any = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: UpperCAmelCase__ : Any = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: UpperCAmelCase__ : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2 UpperCAmelCase__ : Optional[int] = (1.0 - self.betas**2) ** 0.5 UpperCAmelCase__ : Dict = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] UpperCAmelCase__ : List[Any] = timesteps.to(snake_case__ ) UpperCAmelCase__ : Optional[Any] = [] def __a ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : bool = True , ): '''simple docstring''' if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) UpperCAmelCase__ : List[Any] = (self.timesteps == timestep).nonzero().item() UpperCAmelCase__ : str = timestep_index + 1 UpperCAmelCase__ : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(snake_case__ ) if len(self.ets ) == 1: UpperCAmelCase__ : str = self.ets[-1] elif len(self.ets ) == 2: UpperCAmelCase__ : int = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: UpperCAmelCase__ : Any = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2 else: UpperCAmelCase__ : Optional[int] = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4]) UpperCAmelCase__ : Optional[Any] = self._get_prev_sample(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case__ ) def __a ( self : List[str] , snake_case__ : torch.FloatTensor , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ): '''simple docstring''' return sample def __a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.alphas[timestep_index] UpperCAmelCase__ : str = self.betas[timestep_index] UpperCAmelCase__ : Optional[int] = self.alphas[prev_timestep_index] UpperCAmelCase__ : Dict = self.betas[prev_timestep_index] UpperCAmelCase__ : Union[str, Any] = (sample - sigma * ets) / max(snake_case__ , 1e-8 ) UpperCAmelCase__ : List[str] = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self : List[Any] ): '''simple docstring''' return self.config.num_train_timesteps
371
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''efficientformer''' def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[str] = hidden_sizes UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[int] = patch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Optional[int] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Dict = downsamples UpperCAmelCase__ : Any = dim UpperCAmelCase__ : str = key_dim UpperCAmelCase__ : List[Any] = attention_ratio UpperCAmelCase__ : Optional[Any] = resolution UpperCAmelCase__ : Optional[Any] = pool_size UpperCAmelCase__ : Any = downsample_patch_size UpperCAmelCase__ : int = downsample_stride UpperCAmelCase__ : Dict = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[str] = distillation UpperCAmelCase__ : Dict = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Optional[int] = batch_norm_eps
298
0
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
350
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
0
"""simple docstring""" import math class lowerCAmelCase__ : def __init__( self : List[Any] , snake_case__ : Tuple=0 ): # a graph with Node 0,1,...,N-1 '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = n UpperCAmelCase__ : Optional[int] = [ [math.inf for j in range(0 , snake_case__ )] for i in range(0 , snake_case__ ) ] # adjacency matrix for weight UpperCAmelCase__ : Any = [ [math.inf for j in range(0 , snake_case__ )] for i in range(0 , snake_case__ ) ] # dp[i][j] stores minimum distance from i to j def __a ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : Any = w def __a ( self : Tuple ): '''simple docstring''' for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): UpperCAmelCase__ : int = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def __a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] ): '''simple docstring''' return self.dp[u][v] if __name__ == "__main__": _lowerCAmelCase : Dict = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
351
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
298
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _lowerCAmelCase : Any = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : List[Any] = b.T UpperCAmelCase__ : List[Any] = np.sum(np.square(snake_case ) , axis=1 ) UpperCAmelCase__ : Dict = np.sum(np.square(snake_case ) , axis=0 ) UpperCAmelCase__ : Optional[int] = np.matmul(snake_case , snake_case ) UpperCAmelCase__ : Dict = aa[:, None] - 2 * ab + ba[None, :] return d def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] )-> Tuple: '''simple docstring''' UpperCAmelCase__ : str = x.reshape(-1 , 3 ) UpperCAmelCase__ : str = squared_euclidean_distance(snake_case , snake_case ) return np.argmin(snake_case , axis=1 ) class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =['''pixel_values'''] def __init__( self : Union[str, Any] , snake_case__ : Optional[Union[List[List[int]], np.ndarray]] = None , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : bool = True , snake_case__ : bool = True , **snake_case__ : List[Any] , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = size if size is not None else {"height": 2_5_6, "width": 2_5_6} UpperCAmelCase__ : Union[str, Any] = get_size_dict(snake_case__ ) UpperCAmelCase__ : Tuple = np.array(snake_case__ ) if clusters is not None else None UpperCAmelCase__ : Union[str, Any] = do_resize UpperCAmelCase__ : Optional[Any] = size UpperCAmelCase__ : Dict = resample UpperCAmelCase__ : Any = do_normalize UpperCAmelCase__ : List[Any] = do_color_quantize def __a ( self : Optional[int] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Tuple , ): '''simple docstring''' UpperCAmelCase__ : str = get_size_dict(snake_case__ ) if "height" not in size or "width" not in size: raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' ) return resize( snake_case__ , size=(size["height"], size["width"]) , resample=snake_case__ , data_format=snake_case__ , **snake_case__ ) def __a ( self : Tuple , snake_case__ : np.ndarray , snake_case__ : Optional[Union[str, ChannelDimension]] = None , ): '''simple docstring''' UpperCAmelCase__ : Dict = rescale(image=snake_case__ , scale=1 / 127.5 , data_format=snake_case__ ) UpperCAmelCase__ : List[str] = image - 1 return image def __a ( self : List[Any] , snake_case__ : ImageInput , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Union[List[List[int]], np.ndarray]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **snake_case__ : List[str] , ): '''simple docstring''' UpperCAmelCase__ : Any = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : str = size if size is not None else self.size UpperCAmelCase__ : int = get_size_dict(snake_case__ ) UpperCAmelCase__ : int = resample if resample is not None else self.resample UpperCAmelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize UpperCAmelCase__ : Dict = clusters if clusters is not None else self.clusters UpperCAmelCase__ : List[str] = np.array(snake_case__ ) UpperCAmelCase__ : List[Any] = make_list_of_images(snake_case__ ) if not valid_images(snake_case__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. UpperCAmelCase__ : str = [to_numpy_array(snake_case__ ) for image in images] if do_resize: UpperCAmelCase__ : Any = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images] if do_normalize: UpperCAmelCase__ : List[Any] = [self.normalize(image=snake_case__ ) for image in images] if do_color_quantize: UpperCAmelCase__ : Dict = [to_channel_dimension_format(snake_case__ , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) UpperCAmelCase__ : Optional[Any] = np.array(snake_case__ ) UpperCAmelCase__ : List[str] = color_quantize(snake_case__ , snake_case__ ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) UpperCAmelCase__ : Dict = images.shape[0] UpperCAmelCase__ : str = images.reshape(snake_case__ , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. UpperCAmelCase__ : Optional[Any] = list(snake_case__ ) else: UpperCAmelCase__ : Tuple = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images] UpperCAmelCase__ : List[Any] = {"input_ids": images} return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
352
"""simple docstring""" import functools def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int: '''simple docstring''' UpperCAmelCase__ : List[str] = len(snake_case ) UpperCAmelCase__ : str = len(snake_case ) @functools.cache def min_distance(snake_case : int , snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Optional[Any] = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class lowerCAmelCase__ : @add_start_docstrings(snake_case__ ) def __call__( self : Any , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class lowerCAmelCase__ : @add_start_docstrings(snake_case__ ) def __call__( self : str , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class lowerCAmelCase__ ( __magic_name__ ): @add_start_docstrings(snake_case__ ) def __call__( self : List[Any] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int , **snake_case__ : List[Any] ): '''simple docstring''' for processor in self: UpperCAmelCase__ : Optional[Any] = inspect.signature(processor.__call__ ).parameters if len(snake_case__ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'Make sure that all the required parameters: {list(function_args.keys() )} for ' f'{processor.__class__} are passed to the logits processor.' ) UpperCAmelCase__ : Dict = processor(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ) else: UpperCAmelCase__ : int = processor(snake_case__ , snake_case__ , snake_case__ ) return scores class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : List[Any] , snake_case__ : float ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or not (temperature > 0): raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' ) UpperCAmelCase__ : str = temperature def __call__( self : Dict , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Dict = scores / self.temperature return scores class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : Union[str, Any] , snake_case__ : float , snake_case__ : float = -float("Inf" ) , snake_case__ : int = 1 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' ) if not isinstance(snake_case__ , snake_case__ ) or (min_tokens_to_keep < 1): raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' ) UpperCAmelCase__ : Tuple = top_p UpperCAmelCase__ : int = filter_value UpperCAmelCase__ : Any = min_tokens_to_keep def __call__( self : List[Any] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Any = lax.top_k(snake_case__ , scores.shape[-1] ) UpperCAmelCase__ : str = jnp.full_like(snake_case__ , self.filter_value ) UpperCAmelCase__ : List[str] = jax.nn.softmax(snake_case__ , axis=-1 ).cumsum(axis=-1 ) UpperCAmelCase__ : int = cumulative_probs < self.top_p # include the token that is higher than top_p as well UpperCAmelCase__ : str = jnp.roll(snake_case__ , 1 ) score_mask |= score_mask.at[:, 0].set(snake_case__ ) # min tokens to keep UpperCAmelCase__ : Dict = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case__ ) UpperCAmelCase__ : int = jnp.where(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase__ : int = jax.lax.sort_key_val(snake_case__ , snake_case__ )[-1] return next_scores class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : float = -float("Inf" ) , snake_case__ : int = 1 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or top_k <= 0: raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' ) UpperCAmelCase__ : List[str] = max(snake_case__ , snake_case__ ) UpperCAmelCase__ : str = filter_value def __call__( self : List[str] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Any = scores.shape UpperCAmelCase__ : Union[str, Any] = jnp.full(batch_size * vocab_size , self.filter_value ) UpperCAmelCase__ : Optional[int] = min(self.top_k , scores.shape[-1] ) # Safety check UpperCAmelCase__ : Union[str, Any] = lax.top_k(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to((jnp.arange(snake_case__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() UpperCAmelCase__ : str = topk_scores.flatten() UpperCAmelCase__ : Optional[int] = topk_indices.flatten() + shift UpperCAmelCase__ : Dict = next_scores_flat.at[topk_indices_flat].set(snake_case__ ) UpperCAmelCase__ : Tuple = next_scores_flat.reshape(snake_case__ , snake_case__ ) return next_scores class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : Union[str, Any] , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Tuple = bos_token_id def __call__( self : Optional[int] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : str = jnp.full(scores.shape , -float("inf" ) ) UpperCAmelCase__ : List[str] = 1 - jnp.bool_(cur_len - 1 ) UpperCAmelCase__ : Optional[int] = jnp.where(snake_case__ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case__ ) return scores class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : int , snake_case__ : int , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = max_length UpperCAmelCase__ : int = eos_token_id def __call__( self : Union[str, Any] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Dict = jnp.full(scores.shape , -float("inf" ) ) UpperCAmelCase__ : Optional[int] = 1 - jnp.bool_(cur_len - self.max_length + 1 ) UpperCAmelCase__ : Dict = jnp.where(snake_case__ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case__ ) return scores class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : int ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or min_length < 0: raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' ) if not isinstance(snake_case__ , snake_case__ ) or eos_token_id < 0: raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' ) UpperCAmelCase__ : Dict = min_length UpperCAmelCase__ : Optional[Any] = eos_token_id def __call__( self : Dict , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Any = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) UpperCAmelCase__ : List[str] = jnp.where(snake_case__ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , snake_case__ ) return scores class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : Tuple , snake_case__ : List[str] , snake_case__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = list(snake_case__ ) UpperCAmelCase__ : List[Any] = begin_index def __call__( self : Any , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : str = 1 - jnp.bool_(cur_len - self.begin_index ) UpperCAmelCase__ : Tuple = jnp.where(snake_case__ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , snake_case__ ) return scores class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : Optional[Any] , snake_case__ : list ): '''simple docstring''' UpperCAmelCase__ : Any = list(snake_case__ ) def __call__( self : Union[str, Any] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Dict = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : List[Any] , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : str = dict(snake_case__ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. UpperCAmelCase__ : List[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: UpperCAmelCase__ : str = force_token_array.at[index].set(snake_case__ ) UpperCAmelCase__ : str = jnp.intaa(snake_case__ ) def __call__( self : Dict , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ): '''simple docstring''' def _force_token(snake_case__ : Union[str, Any] ): UpperCAmelCase__ : List[Any] = scores.shape[0] UpperCAmelCase__ : Union[str, Any] = self.force_token_array[generation_idx] UpperCAmelCase__ : Any = jnp.ones_like(snake_case__ , dtype=scores.dtype ) * -float("inf" ) UpperCAmelCase__ : str = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) UpperCAmelCase__ : Dict = lax.dynamic_update_slice(snake_case__ , snake_case__ , (0, current_token) ) return new_scores UpperCAmelCase__ : List[Any] = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case__ ) , lambda: scores , ) , ) return scores class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = generate_config.eos_token_id UpperCAmelCase__ : str = generate_config.no_timestamps_token_id UpperCAmelCase__ : int = generate_config.no_timestamps_token_id + 1 UpperCAmelCase__ : Union[str, Any] = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(snake_case__ , "max_initial_timestamp_index" ): UpperCAmelCase__ : Dict = generate_config.max_initial_timestamp_index else: UpperCAmelCase__ : int = model_config.vocab_size if self.max_initial_timestamp_index is None: UpperCAmelCase__ : Union[str, Any] = model_config.vocab_size def __call__( self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ): UpperCAmelCase__ : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case__ , ) UpperCAmelCase__ : List[Any] = jnp.where((cur_len - self.begin_index) < 2 , snake_case__ , snake_case__ ) UpperCAmelCase__ : int = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case__ , snake_case__ , ) return jnp.where( snake_case__ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , snake_case__ , ) UpperCAmelCase__ : Tuple = jax.vmap(snake_case__ )(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[int] = jnp.where(cur_len == self.begin_index , snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case__ , ) UpperCAmelCase__ : str = self.timestamp_begin + self.max_initial_timestamp_index UpperCAmelCase__ : Optional[int] = jnp.where( snake_case__ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , snake_case__ , ) # if sum of probability over timestamps is above any other token, sample timestamp UpperCAmelCase__ : int = jax.nn.log_softmax(snake_case__ , axis=-1 ) def handle_cumulative_probs(snake_case__ : Dict , snake_case__ : List[Any] ): UpperCAmelCase__ : Optional[int] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) UpperCAmelCase__ : List[Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , snake_case__ , ) UpperCAmelCase__ : List[Any] = jax.vmap(snake_case__ )(snake_case__ , snake_case__ ) return scores
353
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : List[Any] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase__ : Tuple = input_file.read() UpperCAmelCase__ : Tuple = regexp.search(snake_case__ ) return match def __a ( self : List[str] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase__ : Dict = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase__ : int = regexp.finditer(snake_case__ ) UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Path("./datasets" ) UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case__ ) ): raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = Path("./datasets" ) UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case__ ) ): raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
298
0
"""simple docstring""" from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter _lowerCAmelCase : str = logging.get_logger(__name__) _lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {} _lowerCAmelCase : Dict[Optional[str], str] = {} _lowerCAmelCase : Dict[Optional[str], Exception] = {} def SCREAMING_SNAKE_CASE__ ( snake_case : type , snake_case : Optional[str] , snake_case : Optional[List[str]] = None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' ) UpperCAmelCase__ : Optional[Any] = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' ) UpperCAmelCase__ : Optional[Any] = format_type def SCREAMING_SNAKE_CASE__ ( snake_case : Exception , snake_case : Optional[str] , snake_case : Optional[List[str]] = None )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): UpperCAmelCase__ : List[Any] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["""python"""]) _register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""]) _register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""]) _register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""]) _register_formatter(CustomFormatter, """custom""") if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""]) else: _lowerCAmelCase : Dict = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""") _register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""]) else: _lowerCAmelCase : str = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""") _register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, """jax""", aliases=[]) else: _lowerCAmelCase : int = ValueError("""JAX needs to be installed to be able to return JAX arrays.""") _register_unavailable_formatter(_jax_error, """jax""", aliases=[]) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] )-> Optional[str]: '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , **snake_case : List[str] )-> Formatter: '''simple docstring''' UpperCAmelCase__ : List[Any] = get_format_type_from_alias(snake_case ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**snake_case ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
354
"""simple docstring""" import numpy as np import datasets _lowerCAmelCase : Optional[int] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ _lowerCAmelCase : Tuple = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ _lowerCAmelCase : Optional[int] = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ): '''simple docstring''' # convert to numpy arrays UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ ) UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T ) try: UpperCAmelCase__ : str = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ ) UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ ) UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
298
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =StableDiffusionInstructPixaPixPipeline SCREAMING_SNAKE_CASE_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} SCREAMING_SNAKE_CASE_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE_ =IMAGE_TO_IMAGE_IMAGE_PARAMS def __a ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) UpperCAmelCase__ : Dict = PNDMScheduler(skip_prk_steps=snake_case__ ) torch.manual_seed(0 ) UpperCAmelCase__ : Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) UpperCAmelCase__ : Dict = CLIPTextModel(snake_case__ ) UpperCAmelCase__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase__ : List[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __a ( self : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]=0 ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) UpperCAmelCase__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : Any = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ) if str(snake_case__ ).startswith("mps" ): UpperCAmelCase__ : int = torch.manual_seed(snake_case__ ) else: UpperCAmelCase__ : Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase__ : List[str] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) UpperCAmelCase__ : Optional[int] = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs(snake_case__ ) UpperCAmelCase__ : str = sd_pipe(**snake_case__ ).images UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) UpperCAmelCase__ : Any = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) UpperCAmelCase__ : str = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase__ : int = self.get_dummy_inputs(snake_case__ ) UpperCAmelCase__ : List[Any] = "french fries" UpperCAmelCase__ : Any = sd_pipe(**snake_case__ , negative_prompt=snake_case__ ) UpperCAmelCase__ : Union[str, Any] = output.images UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) UpperCAmelCase__ : Any = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : List[str] = self.get_dummy_components() UpperCAmelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) UpperCAmelCase__ : List[str] = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = [inputs["prompt"]] * 2 UpperCAmelCase__ : Optional[Any] = np.array(inputs["image"] ).astype(np.floataa ) / 255.0 UpperCAmelCase__ : Union[str, Any] = torch.from_numpy(snake_case__ ).unsqueeze(0 ).to(snake_case__ ) UpperCAmelCase__ : Optional[Any] = image / 2 + 0.5 UpperCAmelCase__ : int = image.permute(0 , 3 , 1 , 2 ) UpperCAmelCase__ : Tuple = image.repeat(2 , 1 , 1 , 1 ) UpperCAmelCase__ : Any = sd_pipe(**snake_case__ ).images UpperCAmelCase__ : Optional[int] = image[-1, -3:, -3:, -1] assert image.shape == (2, 3_2, 3_2, 3) UpperCAmelCase__ : str = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : List[str] = self.get_dummy_components() UpperCAmelCase__ : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" ) UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) UpperCAmelCase__ : Union[str, Any] = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(snake_case__ ) UpperCAmelCase__ : Tuple = sd_pipe(**snake_case__ ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase__ : int = [round(snake_case__ , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(snake_case__ ) for x in slice] ) ) assert image.shape == (1, 3_2, 3_2, 3) UpperCAmelCase__ : int = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __a ( self : Dict ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.get_dummy_components() UpperCAmelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) UpperCAmelCase__ : List[str] = VaeImageProcessor(do_resize=snake_case__ , do_normalize=snake_case__ ) UpperCAmelCase__ : Dict = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase__ : List[str] = pipe(**self.get_dummy_inputs_by_type(snake_case__ , input_image_type="pt" ) )[0] UpperCAmelCase__ : Union[str, Any] = components["vae"] UpperCAmelCase__ : int = self.get_dummy_inputs_by_type(snake_case__ , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): UpperCAmelCase__ : Dict = vae.encode(inputs[image_param] ).latent_dist.mode() UpperCAmelCase__ : str = pipe(**snake_case__ )[0] UpperCAmelCase__ : Optional[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(snake_case__ , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Optional[Any] , snake_case__ : int=0 ): '''simple docstring''' UpperCAmelCase__ : str = torch.manual_seed(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) UpperCAmelCase__ : str = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() UpperCAmelCase__ : Optional[Any] = self.get_inputs() UpperCAmelCase__ : Tuple = pipe(**snake_case__ ).images UpperCAmelCase__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ : int = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=snake_case__ ) UpperCAmelCase__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() UpperCAmelCase__ : Any = self.get_inputs() UpperCAmelCase__ : List[str] = pipe(**snake_case__ ).images UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ : Optional[int] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=snake_case__ ) UpperCAmelCase__ : int = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = self.get_inputs() UpperCAmelCase__ : List[Any] = pipe(**snake_case__ ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ : Any = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = 0 def callback_fn(snake_case__ : int , snake_case__ : int , snake_case__ : torch.FloatTensor ) -> None: UpperCAmelCase__ : Optional[int] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: UpperCAmelCase__ : Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) UpperCAmelCase__ : Any = latents[0, -3:, -3:, -1] UpperCAmelCase__ : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: UpperCAmelCase__ : Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) UpperCAmelCase__ : Optional[int] = latents[0, -3:, -3:, -1] UpperCAmelCase__ : Optional[Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 UpperCAmelCase__ : Union[str, Any] = False UpperCAmelCase__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=snake_case__ , torch_dtype=torch.floataa ) UpperCAmelCase__ : int = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = self.get_inputs() pipe(**snake_case__ , callback=snake_case__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __a ( self : Dict ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=snake_case__ , torch_dtype=torch.floataa ) UpperCAmelCase__ : Tuple = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Optional[Any] = self.get_inputs() UpperCAmelCase__ : str = pipe(**snake_case__ ) UpperCAmelCase__ : str = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 1_0**9 def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 UpperCAmelCase__ : Dict = inputs["image"].resize((5_0_4, 5_0_4) ) UpperCAmelCase__ : Dict = "timbrooks/instruct-pix2pix" UpperCAmelCase__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( snake_case__ , safety_checker=snake_case__ , ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() UpperCAmelCase__ : Optional[int] = pipe(**snake_case__ ) UpperCAmelCase__ : Union[str, Any] = output.images[0] UpperCAmelCase__ : Union[str, Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 5_0_4, 3) UpperCAmelCase__ : Optional[int] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
355
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =IFPipeline SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self : Dict ): '''simple docstring''' return self._get_dummy_components() def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): UpperCAmelCase__ : str = torch.manual_seed(snake_case__ ) else: UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase__ : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self : Tuple ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self : Tuple ): '''simple docstring''' # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __a ( self : Dict ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __a ( self : int ): '''simple docstring''' self._test_save_load_local() def __a ( self : Any ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self : Optional[Any] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : str ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Tuple ): '''simple docstring''' # if UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : List[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : str = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Tuple = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : str = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Optional[Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
298
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : int = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''ibert''' def __init__( self : Any , snake_case__ : Dict=3_0_5_2_2 , snake_case__ : Any=7_6_8 , snake_case__ : Any=1_2 , snake_case__ : Tuple=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : int=2 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=1e-12 , snake_case__ : List[str]=1 , snake_case__ : str=0 , snake_case__ : Dict=2 , snake_case__ : Dict="absolute" , snake_case__ : List[Any]=False , snake_case__ : Dict="none" , **snake_case__ : List[str] , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Tuple = hidden_size UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : Tuple = num_attention_heads UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : List[Any] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob UpperCAmelCase__ : Dict = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Optional[int] = type_vocab_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Tuple = layer_norm_eps UpperCAmelCase__ : List[Any] = position_embedding_type UpperCAmelCase__ : Any = quant_mode UpperCAmelCase__ : int = force_dequant class lowerCAmelCase__ ( __magic_name__ ): @property def __a ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase__ : str = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase__ : List[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
356
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } _lowerCAmelCase : List[Any] = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } _lowerCAmelCase : int = { """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = set() UpperCAmelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Dict = char UpperCAmelCase__ : Tuple = set(snake_case ) return pairs class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : Dict = vocab_file UpperCAmelCase__ : Tuple = merges_file UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Dict = 3 self.add_from_file(snake_case__ ) UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1] UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges] UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Dict = {} def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] UpperCAmelCase__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Tuple = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : Dict , snake_case__ : Tuple ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase__ : Any = get_pairs(snake_case__ ) if not pairs: return token while True: UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Tuple = 0 while i < len(snake_case__ ): try: UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Dict = j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Dict = tuple(snake_case__ ) UpperCAmelCase__ : List[Any] = new_word if len(snake_case__ ) == 1: break else: UpperCAmelCase__ : Dict = get_pairs(snake_case__ ) UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ ) UpperCAmelCase__ : Optional[int] = word[:-4] UpperCAmelCase__ : Union[str, Any] = word return word def __a ( self : List[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def __a ( self : Dict , snake_case__ : List[str] ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def __a ( self : List[Any] , snake_case__ : Any ): '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def __a ( self : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : Tuple = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : str = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ): copyfile(self.merges_file , snake_case__ ) return out_vocab_file, out_merge_file def __a ( self : List[Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): try: with open(snake_case__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' ) return UpperCAmelCase__ : Dict = f.readlines() for lineTmp in lines: UpperCAmelCase__ : Optional[int] = lineTmp.strip() UpperCAmelCase__ : Tuple = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) UpperCAmelCase__ : Any = line[:idx] UpperCAmelCase__ : str = len(self.encoder )
298
0
"""simple docstring""" _lowerCAmelCase : Optional[Any] = 8.31_44_62 # Unit - J mol-1 K-1 def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : float )-> float: '''simple docstring''' if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : float )-> float: '''simple docstring''' if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
357
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 # setable values SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None @classmethod def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ ) @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ =42 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return True @register_to_config def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ): '''simple docstring''' UpperCAmelCase__ : Tuple = dtype def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ): '''simple docstring''' if common is None: UpperCAmelCase__ : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype ) UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , ) def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ): '''simple docstring''' return sample def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case__ , timesteps=snake_case__ , ) def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: UpperCAmelCase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": UpperCAmelCase__ : List[Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": UpperCAmelCase__ : List[str] = variance UpperCAmelCase__ : Optional[Any] = state.common.betas[t] UpperCAmelCase__ : Any = (predicted_variance + 1) / 2 UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if key is None: UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 ) else: UpperCAmelCase__ : int = None # 1. compute alphas, betas UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) UpperCAmelCase__ : List[str] = 1 - alpha_prod_t UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 ) UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ ) def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __len__( self : Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
298
0
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs _lowerCAmelCase : Optional[int] = imread(r"""digital_image_processing/image_data/lena_small.jpg""") _lowerCAmelCase : Tuple = cvtColor(img, COLOR_BGR2GRAY) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' UpperCAmelCase__ : int = cn.convert_to_negative(snake_case ) # assert negative_img array for at least one True assert negative_img.any() def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img: # Work around assertion for response assert str(cc.change_contrast(snake_case , 110 ) ).startswith( "<PIL.Image.Image image mode=RGB size=100x100 at" ) def SCREAMING_SNAKE_CASE__ ( )-> List[str]: '''simple docstring''' UpperCAmelCase__ : Tuple = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' UpperCAmelCase__ : str = imread("digital_image_processing/image_data/lena_small.jpg" , 0 ) # assert ambiguous array for all == True assert canny_img.all() UpperCAmelCase__ : Tuple = canny.canny(snake_case ) # assert canny array for at least one True assert canny_array.any() def SCREAMING_SNAKE_CASE__ ( )-> Tuple: '''simple docstring''' assert gg.gaussian_filter(snake_case , 5 , sigma=0.9 ).all() def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' UpperCAmelCase__ : List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) UpperCAmelCase__ : int = conv.img_convolve(snake_case , snake_case ).astype(snake_case ) assert res.any() def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' assert med.median_filter(snake_case , 3 ).any() def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = sob.sobel_filter(snake_case ) assert grad.any() and theta.any() def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' UpperCAmelCase__ : List[Any] = sp.make_sepia(snake_case , 20 ) assert sepia.all() def SCREAMING_SNAKE_CASE__ ( snake_case : str = "digital_image_processing/image_data/lena_small.jpg" )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : str = bs.Burkes(imread(snake_case , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def SCREAMING_SNAKE_CASE__ ( snake_case : str = "digital_image_processing/image_data/lena_small.jpg" , )-> List[Any]: '''simple docstring''' UpperCAmelCase__ : int = rs.NearestNeighbour(imread(snake_case , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def SCREAMING_SNAKE_CASE__ ( )-> List[str]: '''simple docstring''' UpperCAmelCase__ : List[str] = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. UpperCAmelCase__ : Dict = imread(snake_case , 0 ) # Test for get_neighbors_pixel function() return not None UpperCAmelCase__ : str = 0 UpperCAmelCase__ : str = 0 UpperCAmelCase__ : Any = image[x_coordinate][y_coordinate] UpperCAmelCase__ : int = lbp.get_neighbors_pixel( snake_case , snake_case , snake_case , snake_case ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image UpperCAmelCase__ : Optional[int] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): UpperCAmelCase__ : List[Any] = lbp.local_binary_value(snake_case , snake_case , snake_case ) assert lbp_image.any()
358
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCAmelCase__ : def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : Union[str, Any] = act_dim UpperCAmelCase__ : Dict = state_dim UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : List[str] = max_length UpperCAmelCase__ : int = is_training def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 ) UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase__ : Optional[int] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __a ( self : int ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase__ : Optional[int] = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =() SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids SCREAMING_SNAKE_CASE_ =False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = DecisionTransformerModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : List[str] ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Tuple = [*signature.parameters.keys()] UpperCAmelCase__ : str = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Optional[int] = model.config torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset() UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ ) UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase__ : Union[str, Any] = state UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 ) for step in range(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model( states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1] UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 ) UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase__ : Tuple = torch.cat( [timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
298
0
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ): '''simple docstring''' UpperCAmelCase__ : Any = "bilinear" UpperCAmelCase__ : Any = max_size UpperCAmelCase__ : Any = short_edge_length def __call__( self : Dict , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for img in imgs: UpperCAmelCase__ : int = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ ) if h < w: UpperCAmelCase__ : Optional[int] = size, scale * w else: UpperCAmelCase__ : int = scale * h, size if max(snake_case__ , snake_case__ ) > self.max_size: UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = newh * scale UpperCAmelCase__ : int = neww * scale UpperCAmelCase__ : List[Any] = int(neww + 0.5 ) UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase__ : Any = Image.fromarray(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ ) else: UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase__ : Tuple = nn.functional.interpolate( snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 ) img_augs.append(snake_case__ ) return img_augs class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase__ : Any = cfg.INPUT.FORMAT UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY UpperCAmelCase__ : str = cfg.PAD_VALUE UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std def __a ( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images] UpperCAmelCase__ : int = [ nn.functional.pad( snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case__ , snake_case__ ) ] return torch.stack(snake_case__ ), torch.tensor(snake_case__ ) def __call__( self : str , snake_case__ : int , snake_case__ : int=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [images] if single_image: assert len(snake_case__ ) == 1 for i in range(len(snake_case__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase__ : Tuple = self.aug(snake_case__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images] # now pad them to do the following operations UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int: '''simple docstring''' assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase__ : Dict = box_size tensor[:, 0].clamp_(min=0 , max=snake_case ) tensor[:, 1].clamp_(min=0 , max=snake_case ) tensor[:, 2].clamp_(min=0 , max=snake_case ) tensor[:, 3].clamp_(min=0 , max=snake_case )
359
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
298
0
"""simple docstring""" from maths.prime_check import is_prime def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int: '''simple docstring''' if not isinstance(snake_case , snake_case ): UpperCAmelCase__ : Optional[Any] = f'Input value of [number={number}] must be an integer' raise TypeError(snake_case ) if is_prime(snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
360
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
0
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =(PNDMScheduler,) SCREAMING_SNAKE_CASE_ =(('''num_inference_steps''', 50),) def __a ( self : Optional[int] , **snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = { "num_train_timesteps": 1_0_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**snake_case__ ) return config def __a ( self : int , snake_case__ : str=0 , **snake_case__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = dict(self.forward_default_kwargs ) UpperCAmelCase__ : Tuple = kwargs.pop("num_inference_steps" , snake_case__ ) UpperCAmelCase__ : List[str] = self.dummy_sample UpperCAmelCase__ : Dict = 0.1 * sample UpperCAmelCase__ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config(**snake_case__ ) UpperCAmelCase__ : str = scheduler_class(**snake_case__ ) scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals UpperCAmelCase__ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case__ ) UpperCAmelCase__ : Tuple = scheduler_class.from_pretrained(snake_case__ ) new_scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals UpperCAmelCase__ : List[Any] = dummy_past_residuals[:] UpperCAmelCase__ : List[Any] = scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample UpperCAmelCase__ : Dict = new_scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" UpperCAmelCase__ : str = scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample UpperCAmelCase__ : Optional[int] = new_scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __a ( self : List[Any] ): '''simple docstring''' pass def __a ( self : Optional[int] , snake_case__ : int=0 , **snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = dict(self.forward_default_kwargs ) UpperCAmelCase__ : Dict = kwargs.pop("num_inference_steps" , snake_case__ ) UpperCAmelCase__ : Tuple = self.dummy_sample UpperCAmelCase__ : List[str] = 0.1 * sample UpperCAmelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config() UpperCAmelCase__ : List[Any] = scheduler_class(**snake_case__ ) scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case__ ) UpperCAmelCase__ : int = scheduler_class.from_pretrained(snake_case__ ) # copy over dummy past residuals new_scheduler.set_timesteps(snake_case__ ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ : Tuple = dummy_past_residuals[:] UpperCAmelCase__ : str = scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample UpperCAmelCase__ : Any = new_scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" UpperCAmelCase__ : int = scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample UpperCAmelCase__ : Optional[int] = new_scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __a ( self : Dict , **snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.scheduler_classes[0] UpperCAmelCase__ : int = self.get_scheduler_config(**snake_case__ ) UpperCAmelCase__ : Tuple = scheduler_class(**snake_case__ ) UpperCAmelCase__ : str = 1_0 UpperCAmelCase__ : int = self.dummy_model() UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter scheduler.set_timesteps(snake_case__ ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ : List[Any] = model(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[int] = scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ : Any = model(snake_case__ , snake_case__ ) UpperCAmelCase__ : Any = scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ ).prev_sample return sample def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase__ : List[str] = kwargs.pop("num_inference_steps" , snake_case__ ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ : List[Any] = self.get_scheduler_config() UpperCAmelCase__ : List[str] = scheduler_class(**snake_case__ ) UpperCAmelCase__ : Optional[Any] = self.dummy_sample UpperCAmelCase__ : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ): scheduler.set_timesteps(snake_case__ ) elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ): UpperCAmelCase__ : Any = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ : Any = dummy_past_residuals[:] UpperCAmelCase__ : Tuple = scheduler.step_prk(snake_case__ , 0 , snake_case__ , **snake_case__ ).prev_sample UpperCAmelCase__ : List[Any] = scheduler.step_prk(snake_case__ , 1 , snake_case__ , **snake_case__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ : Tuple = scheduler.step_plms(snake_case__ , 0 , snake_case__ , **snake_case__ ).prev_sample UpperCAmelCase__ : List[str] = scheduler.step_plms(snake_case__ , 1 , snake_case__ , **snake_case__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __a ( self : Tuple ): '''simple docstring''' for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=snake_case__ ) UpperCAmelCase__ : int = self.scheduler_classes[0] UpperCAmelCase__ : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ : Tuple = scheduler_class(**snake_case__ ) scheduler.set_timesteps(1_0 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , ) def __a ( self : str ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ ) def __a ( self : Optional[int] ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case__ ) def __a ( self : str ): '''simple docstring''' for t in [1, 5, 1_0]: self.check_over_forward(time_step=snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=snake_case__ ) def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 2_7 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ : Optional[int] = self.dummy_sample UpperCAmelCase__ : List[Any] = 0.1 * sample UpperCAmelCase__ : List[Any] = self.get_scheduler_config() UpperCAmelCase__ : str = scheduler_class(**snake_case__ ) scheduler.set_timesteps(snake_case__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ : Dict = scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ ).prev_sample def __a ( self : Any ): '''simple docstring''' with self.assertRaises(snake_case__ ): UpperCAmelCase__ : Dict = self.scheduler_classes[0] UpperCAmelCase__ : Union[str, Any] = self.get_scheduler_config() UpperCAmelCase__ : List[str] = scheduler_class(**snake_case__ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.full_loop() UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase__ : List[Any] = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase__ : str = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) UpperCAmelCase__ : Tuple = torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase__ : int = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 ) UpperCAmelCase__ : Tuple = torch.sum(torch.abs(snake_case__ ) ) UpperCAmelCase__ : Tuple = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
361
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy() UpperCAmelCase__ : List[Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
298
0
"""simple docstring""" from statistics import mean, stdev def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int = 3 )-> list: '''simple docstring''' UpperCAmelCase__ : Tuple = min(snake_case ) UpperCAmelCase__ : Optional[int] = max(snake_case ) # normalize data return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data] def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int = 3 )-> list: '''simple docstring''' UpperCAmelCase__ : Dict = mean(snake_case ) UpperCAmelCase__ : Tuple = stdev(snake_case ) # standardize data return [round((x - mu) / (sigma) , snake_case ) for x in data]
362
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : int = seq_length UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Union[str, Any] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Any = embedding_size UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : List[Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Tuple = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : Union[str, Any] = scope def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = AlbertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_labels UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.num_labels UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_choices UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Tuple = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =True def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ): '''simple docstring''' UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Dict = type self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" ) UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case__ ) UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> bool: '''simple docstring''' UpperCAmelCase__ : Tuple = 0 for ch in input_str: UpperCAmelCase__ : int = ord(snake_case ) UpperCAmelCase__ : Union[str, Any] = pow(2 , snake_case ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
363
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = list(range(snake_case ) ) # Find permutation while factorials: UpperCAmelCase__ : str = factorials.pop() UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
0