code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import os import pytest from attr import dataclass a : Tuple = """us-east-1""" # defaults region @dataclass class UpperCamelCase_ : lowercase = 42 lowercase = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' lowercase = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 500, 'save_steps': 5_500, } lowercase = {**hyperparameters, 'max_steps': 1_000} @property def _lowercase( self ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def _lowercase( self ) -> str: return f'''{self.framework}-transfromers-test''' @property def _lowercase( self ) -> str: return f'''./tests/sagemaker/scripts/{self.framework}''' @property def _lowercase( self ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""" ) def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase : Dict = SageMakerTestEnvironment(framework=request.cls.framework )
265
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Optional[int] = logging.get_logger(__name__) def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : int=False ) -> int: __A : Optional[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'vit.embeddings.cls_token'), ('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __A : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : int , __snake_case : List[str]=False ) -> List[Any]: for i in range(config.num_hidden_layers ): if base_model: __A : int = '' else: __A : List[str] = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __A : Dict = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) __A : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __A : Optional[int] = in_proj_weight[ : config.hidden_size, : ] __A : Any = in_proj_bias[: config.hidden_size] __A : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __A : Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __A : Dict = in_proj_weight[ -config.hidden_size :, : ] __A : List[str] = in_proj_bias[-config.hidden_size :] def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> List[Any]: __A : Dict = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : int ) -> List[str]: __A : int = dct.pop(__snake_case ) __A : int = val def _lowerCAmelCase ( ) -> Tuple: __A : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg' __A : Union[str, Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( __snake_case : Dict , __snake_case : List[str] , __snake_case : List[Any]=True ) -> List[Any]: __A : Dict = ViTConfig() # patch_size if model_name[-1] == "8": __A : List[Any] = 8 # set labels if required if not base_model: __A : List[Any] = 10_00 __A : Any = 'huggingface/label-files' __A : List[str] = 'imagenet-1k-id2label.json' __A : List[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) ) __A : List[str] = {int(__snake_case ): v for k, v in idalabel.items()} __A : Optional[int] = idalabel __A : int = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: __A : Dict = 3_84 __A : List[str] = 15_36 __A : int = 12 __A : Optional[Any] = 6 # load original model from torch hub __A : Dict = torch.hub.load('facebookresearch/dino:main' , __snake_case ) original_model.eval() # load state_dict of original model, remove and rename some keys __A : List[Any] = original_model.state_dict() if base_model: remove_classification_head_(__snake_case ) __A : Optional[Any] = create_rename_keys(__snake_case , base_model=__snake_case ) for src, dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) read_in_q_k_v(__snake_case , __snake_case , __snake_case ) # load HuggingFace model if base_model: __A : Any = ViTModel(__snake_case , add_pooling_layer=__snake_case ).eval() else: __A : int = ViTForImageClassification(__snake_case ).eval() model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by ViTImageProcessor __A : Optional[Any] = ViTImageProcessor() __A : Optional[Any] = image_processor(images=prepare_img() , return_tensors='pt' ) __A : List[Any] = encoding['pixel_values'] __A : Dict = model(__snake_case ) if base_model: __A : Optional[Any] = original_model(__snake_case ) assert torch.allclose(__snake_case , outputs.last_hidden_state[:, 0, :] , atol=1e-1 ) else: __A : int = original_model(__snake_case ) assert logits.shape == outputs.logits.shape assert torch.allclose(__snake_case , outputs.logits , atol=1e-3 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__snake_case ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": lowercase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''dino_vitb16''', type=str, help='''Name of the model trained with DINO you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--base_model''', action='''store_true''', help='''Whether to only convert the base model (no projection head weights).''', ) parser.set_defaults(base_model=True) lowercase__ : Dict = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
354
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''): lowercase__ : Dict = { '''linear''': PIL.Image.Resampling.BILINEAR, '''bilinear''': PIL.Image.Resampling.BILINEAR, '''bicubic''': PIL.Image.Resampling.BICUBIC, '''lanczos''': PIL.Image.Resampling.LANCZOS, '''nearest''': PIL.Image.Resampling.NEAREST, } else: lowercase__ : Any = { '''linear''': PIL.Image.LINEAR, '''bilinear''': PIL.Image.BILINEAR, '''bicubic''': PIL.Image.BICUBIC, '''lanczos''': PIL.Image.LANCZOS, '''nearest''': PIL.Image.NEAREST, } def _lowerCAmelCase ( __snake_case : Any ) -> Optional[Any]: __A : Dict = (images / 2 + 0.5).clamp(0 , 1 ) __A : str = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __A : Dict = numpy_to_pil(__snake_case ) return images def _lowerCAmelCase ( __snake_case : List[Any] ) -> Optional[Any]: if images.ndim == 3: __A : List[Any] = images[None, ...] __A : List[str] = (images * 2_55).round().astype('uint8' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images __A : str = [Image.fromarray(image.squeeze() , mode='L' ) for image in images] else: __A : str = [Image.fromarray(__snake_case ) for image in images] return pil_images
190
0
'''simple docstring''' def a__ ( a__ , a__ ): """simple docstring""" _enforce_args(a__ , a__ ) if n == 0: return 0 __SCREAMING_SNAKE_CASE = float("""-inf""" ) for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max( a__ , prices[i - 1] + naive_cut_rod_recursive(n - i , a__ ) ) return max_revue def a__ ( a__ , a__ ): """simple docstring""" _enforce_args(a__ , a__ ) __SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(a__ , a__ , a__ ) def a__ ( a__ , a__ , a__ ): """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: __SCREAMING_SNAKE_CASE = float("""-inf""" ) for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max( a__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , a__ , a__ ) , ) __SCREAMING_SNAKE_CASE = max_revenue return max_rev[n] def a__ ( a__ , a__ ): """simple docstring""" _enforce_args(a__ , a__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. __SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )] __SCREAMING_SNAKE_CASE = 0 for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max_rev[i] for j in range(1 , i + 1 ): __SCREAMING_SNAKE_CASE = max(a__ , prices[j - 1] + max_rev[i - j] ) __SCREAMING_SNAKE_CASE = max_revenue_i return max_rev[n] def a__ ( a__ , a__ ): """simple docstring""" if n < 0: __SCREAMING_SNAKE_CASE = F'n must be greater than or equal to 0. Got n = {n}' raise ValueError(a__ ) if n > len(a__ ): __SCREAMING_SNAKE_CASE = ( """Each integral piece of rod must have a corresponding price. """ F'Got n = {n} but length of prices = {len(a__ )}' ) raise ValueError(a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = [6, 10, 12, 15, 20, 23] __SCREAMING_SNAKE_CASE = len(a__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. __SCREAMING_SNAKE_CASE = 36 __SCREAMING_SNAKE_CASE = top_down_cut_rod(a__ , a__ ) __SCREAMING_SNAKE_CASE = bottom_up_cut_rod(a__ , a__ ) __SCREAMING_SNAKE_CASE = naive_cut_rod_recursive(a__ , a__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
267
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : str = { 'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json', } class lowerCAmelCase__ ( a , a ): """simple docstring""" lowerCAmelCase__ = "convnextv2" def __init__( self : Any , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-12 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=224 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_stages __SCREAMING_SNAKE_CASE = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes __SCREAMING_SNAKE_CASE = [3, 3, 9, 3] if depths is None else depths __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = drop_path_rate __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
267
1
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowercase__ =logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def __UpperCamelCase ( lowerCAmelCase__ : Any ): if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ): return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths ) def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ): __a : Any = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()] __a : Union[str, Any] = [] if args.gold_data_mode == "qa": __a : Any = pd.read_csv(lowerCAmelCase__ , sep='''\t''' , header=lowerCAmelCase__ ) for answer_list in data[1]: __a : List[str] = ast.literal_eval(lowerCAmelCase__ ) answers.append(lowerCAmelCase__ ) else: __a : List[str] = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()] __a : Optional[int] = [[reference] for reference in references] __a : Optional[int] = 0 for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ): total += 1 em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __a : Dict = 1_00.0 * em / total __a : Optional[Any] = 1_00.0 * fa / total logger.info(f"F1: {fa:.2f}" ) logger.info(f"EM: {em:.2f}" ) def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict ): __a : Union[str, Any] = args.k __a : str = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()] __a : Optional[int] = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()] __a : Tuple = 0 for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ): __a : Tuple = set(hypo.split('''\t''' )[:k] ) __a : int = set(reference.split('''\t''' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k __a : Tuple = 1_00.0 * em / total logger.info(f"Precision@{k}: {em: .2f}" ) def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str ): def strip_title(lowerCAmelCase__ : str ): if title.startswith('''"''' ): __a : Optional[int] = title[1:] if title.endswith('''"''' ): __a : str = title[:-1] return title __a : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )['''input_ids'''].to(args.device ) __a : List[Any] = rag_model.rag.question_encoder(lowerCAmelCase__ ) __a : Union[str, Any] = question_enc_outputs[0] __a : str = rag_model.retriever( lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , ) __a : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) __a : Tuple = [] for docs in all_docs: __a : List[Any] = [strip_title(lowerCAmelCase__ ) for title in docs['''title''']] provenance_strings.append('''\t'''.join(lowerCAmelCase__ ) ) return provenance_strings def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ): with torch.no_grad(): __a : List[str] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ ) __a : Optional[int] = inputs_dict.input_ids.to(args.device ) __a : int = inputs_dict.attention_mask.to(args.device ) __a : Any = rag_model.generate( # rag_model overwrites generate lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) __a : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) if args.print_predictions: for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info('''Q: {} - A: {}'''.format(lowerCAmelCase__ , lowerCAmelCase__ ) ) return answers def __UpperCamelCase ( ): __a : List[str] = argparse.ArgumentParser() parser.add_argument( '''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=lowerCAmelCase__ , help=( '''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the''' ''' model_name_or_path''' ) , ) parser.add_argument( '''--index_name''' , default=lowerCAmelCase__ , choices=['''exact''', '''compressed''', '''legacy'''] , type=lowerCAmelCase__ , help='''RAG model retriever type''' , ) parser.add_argument( '''--index_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''Path to the retrieval index''' , ) parser.add_argument('''--n_docs''' , default=5 , type=lowerCAmelCase__ , help='''Number of retrieved docs''' ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=lowerCAmelCase__ , help=( '''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates''' ''' precision@k.''' ) , ) parser.add_argument('''--k''' , default=1 , type=lowerCAmelCase__ , help='''k for the precision@k calculation''' ) parser.add_argument( '''--evaluation_set''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to a file containing evaluation samples''' , ) parser.add_argument( '''--gold_data_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to a tab-separated file with gold samples''' , ) parser.add_argument( '''--gold_data_mode''' , default='''qa''' , type=lowerCAmelCase__ , choices=['''qa''', '''ans'''] , help=( '''Format of the gold data file''' '''qa - a single line in the following format: question [tab] answer_list''' '''ans - a single line of the gold file contains the expected answer string''' ) , ) parser.add_argument( '''--predictions_path''' , type=lowerCAmelCase__ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , ) parser.add_argument( '''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , ) parser.add_argument( '''--eval_batch_size''' , default=8 , type=lowerCAmelCase__ , help='''Batch size per GPU/CPU for evaluation.''' , ) parser.add_argument( '''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , ) parser.add_argument( '''--num_beams''' , default=4 , type=lowerCAmelCase__ , help='''Number of beams to be used when generating answers''' , ) parser.add_argument('''--min_length''' , default=1 , type=lowerCAmelCase__ , help='''Min length of the generated answers''' ) parser.add_argument('''--max_length''' , default=5_0 , type=lowerCAmelCase__ , help='''Max length of the generated answers''' ) parser.add_argument( '''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , ) parser.add_argument( '''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , ) __a : Optional[Any] = parser.parse_args() __a : Optional[Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) return args def __UpperCamelCase ( lowerCAmelCase__ : Tuple ): __a : Dict = {} if args.model_type is None: __a : Any = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('''rag''' ): __a : Any = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration __a : Dict = args.n_docs if args.index_name is not None: __a : str = args.index_name if args.index_path is not None: __a : int = args.index_path else: __a : int = BartForConditionalGeneration __a : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('''Evaluate the following checkpoints: %s''' , lowerCAmelCase__ ) __a : Any = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k __a : List[str] = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) ) score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) continue logger.info('''***** Running evaluation for {} *****'''.format(lowerCAmelCase__ ) ) logger.info(''' Batch size = %d''' , args.eval_batch_size ) logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) ) if args.model_type.startswith('''rag''' ): __a : Dict = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) __a : Union[str, Any] = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ ) model.retriever.init_retrieval() else: __a : Optional[Any] = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) model.to(args.device ) with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file: __a : Optional[Any] = [] for line in tqdm(lowerCAmelCase__ ): questions.append(line.strip() ) if len(lowerCAmelCase__ ) == args.eval_batch_size: __a : Dict = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write('''\n'''.join(lowerCAmelCase__ ) + '''\n''' ) preds_file.flush() __a : str = [] if len(lowerCAmelCase__ ) > 0: __a : List[Any] = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) preds_file.write('''\n'''.join(lowerCAmelCase__ ) ) preds_file.flush() score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowercase__ =get_args() main(args)
90
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __a : Union[str, Any] = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ ) else: __a : str = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ ) for i, tensor in enumerate(lowerCAmelCase__ ): if padding_side == "right": if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __a : Any = tensor[:sequence_length] else: __a : List[Any] = tensor[:sequence_length] else: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __a : Dict = tensor[:sequence_length] else: __a : int = tensor[:sequence_length] return out_tensor.tolist() def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ): __a : str = ord(lowerCAmelCase__ ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True __a : List[str] = unicodedata.category(lowerCAmelCase__ ) if cat.startswith('''P''' ): return True return False @dataclass class UpperCamelCase__ ( __lowercase ): _SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase _SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : int = -100 _SCREAMING_SNAKE_CASE : str = "pt" def lowerCAmelCase (self : str , snake_case_ : Tuple ): import torch __a : Union[str, Any] = '''label''' if '''label''' in features[0].keys() else '''labels''' __a : Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None __a : Union[str, Any] = self.tokenizer.pad( snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch __a : List[str] = torch.tensor(batch['''entity_ids'''] ).shape[1] __a : Tuple = self.tokenizer.padding_side if padding_side == "right": __a : Union[str, Any] = [ list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels ] else: __a : Dict = [ [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels ] __a : Dict = [feature['''ner_tags'''] for feature in features] __a : Optional[Any] = padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ ) __a : Union[str, Any] = [feature['''original_entity_spans'''] for feature in features] __a : Optional[int] = padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ ) __a : List[str] = {k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
90
1
import collections import importlib.util import os import re from pathlib import Path lowerCamelCase_ = '''src/transformers''' # Matches is_xxx_available() lowerCamelCase_ = re.compile(r'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} lowerCamelCase_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCamelCase_ = re.compile(r'''\s+\"\S*\":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available lowerCamelCase_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") lowerCamelCase_ = re.compile(r'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCamelCase_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", lowerCamelCase_ = re.compile('''^\s+\"([^\"]+)\",''') # Catches a line with objects between brackets only: ["foo", "bar"], lowerCamelCase_ = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo lowerCamelCase_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: lowerCamelCase_ = re.compile(r'''^\s*try:''') # Catches a line with else: lowerCamelCase_ = re.compile(r'''^\s*else:''') def __magic_name__ ( __a : Optional[int] ): '''simple docstring''' if _re_test_backend.search(__UpperCAmelCase ) is None: return None UpperCamelCase__ = [b[0] for b in _re_backend.findall(__UpperCAmelCase )] backends.sort() return "_and_".join(__UpperCAmelCase ) def __magic_name__ ( __a : List[Any] ): '''simple docstring''' with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: UpperCamelCase__ = f.readlines() UpperCamelCase__ = 0 while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__UpperCAmelCase ): return None # First grab the objects without a specific backend in _import_structure UpperCamelCase__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: UpperCamelCase__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__UpperCAmelCase ): UpperCamelCase__ = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0] UpperCamelCase__ = re.findall("""\[([^\]]+)\]""" , __UpperCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue UpperCamelCase__ = _re_import_struct_key_value.search(__UpperCAmelCase ) if single_line_import_search is not None: UpperCamelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__UpperCAmelCase ) > 0] objects.extend(__UpperCAmelCase ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 UpperCamelCase__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCamelCase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): UpperCamelCase__ = lines[line_index] if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None: UpperCamelCase__ = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(""", """ ) UpperCamelCase__ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0] objects.extend(__UpperCAmelCase ) elif _re_between_brackets.search(__UpperCAmelCase ) is not None: UpperCamelCase__ = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(""", """ ) UpperCamelCase__ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0] objects.extend(__UpperCAmelCase ) elif _re_quote_object.search(__UpperCAmelCase ) is not None: objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 UpperCamelCase__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCamelCase__ = [] while ( line_index < len(__UpperCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): UpperCamelCase__ = lines[line_index] UpperCamelCase__ = _re_import.search(__UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCamelCase__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(__UpperCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. UpperCamelCase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCamelCase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCamelCase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): UpperCamelCase__ = lines[line_index] UpperCamelCase__ = _re_import.search(__UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 UpperCamelCase__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __magic_name__ ( __a : Any , __a : Union[str, Any] ): '''simple docstring''' def find_duplicates(__a : Tuple ): return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCamelCase__ = [] for key in import_dict_objects.keys(): UpperCamelCase__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" ) UpperCamelCase__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCamelCase__ = """base imports""" if key == """none""" else f"{key} backend" errors.append(f"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f" {a} in _import_structure but not in TYPE_HINT." ) return errors def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = [] for root, _, files in os.walk(__UpperCAmelCase ): if "__init__.py" in files: UpperCamelCase__ = os.path.join(__UpperCAmelCase , """__init__.py""" ) UpperCamelCase__ = parse_init(__UpperCAmelCase ) if objects is not None: UpperCamelCase__ = analyze_results(*__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: UpperCamelCase__ = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append("""\n""".join(__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > 0: raise ValueError("""\n\n""".join(__UpperCAmelCase ) ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = [] for path, directories, files in os.walk(__UpperCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(__UpperCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__UpperCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0: continue UpperCamelCase__ = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) ) UpperCamelCase__ = short_path.replace(os.path.sep , """.""" ) submodules.append(__UpperCAmelCase ) for fname in files: if fname == "__init__.py": continue UpperCamelCase__ = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) ) UpperCamelCase__ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(__UpperCAmelCase ) return submodules lowerCamelCase_ = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = importlib.util.spec_from_file_location( """transformers""" , os.path.join(__UpperCAmelCase , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) UpperCamelCase__ = spec.loader.load_module() UpperCamelCase__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(__UpperCAmelCase ) > 0: UpperCamelCase__ = """\n""".join(f"- {module}" for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f"{list_of_modules}\n" """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
244
"""simple docstring""" from typing import Any import numpy as np def lowercase_ ( __UpperCAmelCase ) -> bool: return np.array_equal(__UpperCAmelCase , matrix.conjugate().T ) def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any: lowerCAmelCase__ : Optional[int] = v.conjugate().T lowerCAmelCase__ : Optional[int] = v_star.dot(__UpperCAmelCase ) assert isinstance(__UpperCAmelCase , np.ndarray ) return (v_star_dot.dot(__UpperCAmelCase )) / (v_star.dot(__UpperCAmelCase )) def lowercase_ ( ) -> None: lowerCAmelCase__ : Union[str, Any] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] ) lowerCAmelCase__ : List[str] = np.array([[1], [2], [3]] ) assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian.""" print(rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : Union[str, Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian.""" assert rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
242
0
import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) set_seed(7_7_0) SCREAMING_SNAKE_CASE__ = { """c_attn""": """att_proj""", """c_proj""": """out_proj""", """c_fc""": """in_proj""", """transformer.""": """""", """h.""": """layers.""", """ln_1""": """layernorm_1""", """ln_2""": """layernorm_2""", """ln_f""": """layernorm_final""", """wpe""": """position_embeds_layer""", """wte""": """input_embeds_layer""", } SCREAMING_SNAKE_CASE__ = { """text_small""": { """repo_id""": """suno/bark""", """file_name""": """text.pt""", }, """coarse_small""": { """repo_id""": """suno/bark""", """file_name""": """coarse.pt""", }, """fine_small""": { """repo_id""": """suno/bark""", """file_name""": """fine.pt""", }, """text""": { """repo_id""": """suno/bark""", """file_name""": """text_2.pt""", }, """coarse""": { """repo_id""": """suno/bark""", """file_name""": """coarse_2.pt""", }, """fine""": { """repo_id""": """suno/bark""", """file_name""": """fine_2.pt""", }, } SCREAMING_SNAKE_CASE__ = os.path.dirname(os.path.abspath(__file__)) SCREAMING_SNAKE_CASE__ = os.path.join(os.path.expanduser("""~"""), """.cache""") SCREAMING_SNAKE_CASE__ = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""") def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: Tuple=False ): '''simple docstring''' lowercase_ = model_type if use_small: key += "_small" return os.path.join(__lowerCamelCase , REMOTE_MODEL_PATHS[key]["file_name"] ) def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] ): '''simple docstring''' os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) hf_hub_download(repo_id=__lowerCamelCase , filename=__lowerCamelCase , local_dir=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any]=False , __lowerCamelCase: Optional[Any]="text" ): '''simple docstring''' if model_type == "text": lowercase_ = BarkSemanticModel lowercase_ = BarkSemanticConfig lowercase_ = BarkSemanticGenerationConfig elif model_type == "coarse": lowercase_ = BarkCoarseModel lowercase_ = BarkCoarseConfig lowercase_ = BarkCoarseGenerationConfig elif model_type == "fine": lowercase_ = BarkFineModel lowercase_ = BarkFineConfig lowercase_ = BarkFineGenerationConfig else: raise NotImplementedError() lowercase_ = F'{model_type}_small' if use_small else model_type lowercase_ = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(__lowerCamelCase ): logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info["repo_id"] , model_info["file_name"] ) lowercase_ = torch.load(__lowerCamelCase , map_location=__lowerCamelCase ) # this is a hack lowercase_ = checkpoint["model_args"] if "input_vocab_size" not in model_args: lowercase_ = model_args["vocab_size"] lowercase_ = model_args["vocab_size"] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments lowercase_ = model_args.pop("n_head" ) lowercase_ = model_args.pop("n_embd" ) lowercase_ = model_args.pop("n_layer" ) lowercase_ = ConfigClass(**checkpoint["model_args"] ) lowercase_ = ModelClass(config=__lowerCamelCase ) lowercase_ = GenerationConfigClass() lowercase_ = model_generation_config lowercase_ = checkpoint["model"] # fixup checkpoint lowercase_ = "_orig_mod." for k, v in list(state_dict.items() ): if k.startswith(__lowerCamelCase ): # replace part of the key with corresponding layer name in HF implementation lowercase_ = k[len(__lowerCamelCase ) :] for old_layer_name in new_layer_name_dict: lowercase_ = new_k.replace(__lowerCamelCase , new_layer_name_dict[old_layer_name] ) lowercase_ = state_dict.pop(__lowerCamelCase ) lowercase_ = set(state_dict.keys() ) - set(model.state_dict().keys() ) lowercase_ = {k for k in extra_keys if not k.endswith(".attn.bias" )} lowercase_ = set(model.state_dict().keys() ) - set(state_dict.keys() ) lowercase_ = {k for k in missing_keys if not k.endswith(".attn.bias" )} if len(__lowerCamelCase ) != 0: raise ValueError(F'extra keys found: {extra_keys}' ) if len(__lowerCamelCase ) != 0: raise ValueError(F'missing keys: {missing_keys}' ) model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) lowercase_ = model.num_parameters(exclude_embeddings=__lowerCamelCase ) lowercase_ = checkpoint["best_val_loss"].item() logger.info(F'model loaded: {round(n_params/1E6 , 1 )}M params, {round(__lowerCamelCase , 3 )} loss' ) model.eval() model.to(__lowerCamelCase ) del checkpoint, state_dict return model def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: Any="text" ): '''simple docstring''' if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() lowercase_ = "cpu" # do conversion on cpu lowercase_ = _get_ckpt_path(__lowerCamelCase , use_small=__lowerCamelCase ) lowercase_ = _load_model(__lowerCamelCase , __lowerCamelCase , model_type=__lowerCamelCase , use_small=__lowerCamelCase ) # load bark initial model lowercase_ = _bark_load_model(__lowerCamelCase , "cpu" , model_type=__lowerCamelCase , use_small=__lowerCamelCase ) if model_type == "text": lowercase_ = bark_model["model"] if model.num_parameters(exclude_embeddings=__lowerCamelCase ) != bark_model.get_num_params(): raise ValueError("initial and new models don't have the same number of parameters" ) # check if same output as the bark model lowercase_ = 5 lowercase_ = 10 if model_type in ["text", "coarse"]: lowercase_ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) lowercase_ = bark_model(__lowerCamelCase )[0] lowercase_ = model(__lowerCamelCase ) # take last logits lowercase_ = output_new_model_total.logits[:, [-1], :] else: lowercase_ = 3 lowercase_ = 8 lowercase_ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) lowercase_ = model(__lowerCamelCase , __lowerCamelCase ) lowercase_ = bark_model(__lowerCamelCase , __lowerCamelCase ) lowercase_ = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("initial and new outputs don't have the same shape" ) if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError("initial and new outputs are not equal" ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , ): '''simple docstring''' lowercase_ = os.path.join(__lowerCamelCase , __lowerCamelCase ) lowercase_ = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCamelCase , "config.json" ) ) lowercase_ = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCamelCase , "config.json" ) ) lowercase_ = BarkFineConfig.from_pretrained(os.path.join(__lowerCamelCase , "config.json" ) ) lowercase_ = EncodecConfig.from_pretrained("facebook/encodec_24khz" ) lowercase_ = BarkSemanticModel.from_pretrained(__lowerCamelCase ) lowercase_ = BarkCoarseModel.from_pretrained(__lowerCamelCase ) lowercase_ = BarkFineModel.from_pretrained(__lowerCamelCase ) lowercase_ = EncodecModel.from_pretrained("facebook/encodec_24khz" ) lowercase_ = BarkConfig.from_sub_model_configs( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowercase_ = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) lowercase_ = BarkModel(__lowerCamelCase ) lowercase_ = semantic lowercase_ = coarseAcoustic lowercase_ = fineAcoustic lowercase_ = codec lowercase_ = bark_generation_config Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) bark.save_pretrained(__lowerCamelCase , repo_id=__lowerCamelCase , push_to_hub=__lowerCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""") SCREAMING_SNAKE_CASE__ = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
370
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE__ = { """gpt2""": 1_0_2_4, """gpt2-medium""": 1_0_2_4, """gpt2-large""": 1_0_2_4, """gpt2-xl""": 1_0_2_4, """distilgpt2""": 1_0_2_4, } class __lowerCamelCase ( snake_case_ ): """simple docstring""" lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ["input_ids", "attention_mask"] lowerCAmelCase__ = GPTaTokenizer def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__( UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , ) lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase ) lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space: lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) ) lowercase_ = add_prefix_space lowercase_ = pre_tok_class(**UpperCAmelCase ) lowercase_ = add_prefix_space def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase ) def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase ) def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase ) def A__ ( self , UpperCAmelCase ) -> List[int]: '''simple docstring''' lowercase_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] ) if len(UpperCAmelCase ) > self.model_max_length: lowercase_ = input_ids[-self.model_max_length :] return input_ids
297
0
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=SCREAMING_SNAKE_CASE_ ) class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" a_ =field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) a_ =Features({"""text""": Value("""string""" )} ) a_ =Features({"""summary""": Value("""string""" )} ) a_ ="text" a_ ="summary" @property def _lowercase ( self : Union[str, Any] ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
208
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" a_ =["""image_processor""", """tokenizer"""] a_ ="""OwlViTImageProcessor""" a_ =("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self : Any , _a : str=None , _a : Optional[Any]=None , **_a : int ) -> List[str]: __lowerCamelCase : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _a , ) __lowerCamelCase : Tuple = kwargs.pop('feature_extractor' ) __lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_a , _a ) def __call__( self : Tuple , _a : Tuple=None , _a : int=None , _a : List[Any]=None , _a : List[Any]="max_length" , _a : Union[str, Any]="np" , **_a : Union[str, Any] ) -> List[str]: if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.' ) if text is not None: if isinstance(_a , _a ) or (isinstance(_a , _a ) and not isinstance(text[0] , _a )): __lowerCamelCase : Any = [self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )] elif isinstance(_a , _a ) and isinstance(text[0] , _a ): __lowerCamelCase : List[Any] = [] # Maximum number of queries across batch __lowerCamelCase : str = max([len(_a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_a ) != max_num_queries: __lowerCamelCase : List[Any] = t + [' '] * (max_num_queries - len(_a )) __lowerCamelCase : Dict = self.tokenizer(_a , padding=_a , return_tensors=_a , **_a ) encodings.append(_a ) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings' ) if return_tensors == "np": __lowerCamelCase : Optional[Any] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) __lowerCamelCase : str = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __lowerCamelCase : int = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) __lowerCamelCase : List[str] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __lowerCamelCase : str = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 ) __lowerCamelCase : Optional[Any] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __lowerCamelCase : List[Any] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 ) __lowerCamelCase : int = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 ) else: raise ValueError('Target return tensor type could not be returned' ) __lowerCamelCase : Any = BatchEncoding() __lowerCamelCase : Dict = input_ids __lowerCamelCase : str = attention_mask if query_images is not None: __lowerCamelCase : Optional[int] = BatchEncoding() __lowerCamelCase : List[Any] = self.image_processor( _a , return_tensors=_a , **_a ).pixel_values __lowerCamelCase : str = query_pixel_values if images is not None: __lowerCamelCase : Union[str, Any] = self.image_processor(_a , return_tensors=_a , **_a ) if text is not None and images is not None: __lowerCamelCase : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: __lowerCamelCase : Tuple = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def _lowercase ( self : Optional[Any] , *_a : List[str] , **_a : Dict ) -> int: return self.image_processor.post_process(*_a , **_a ) def _lowercase ( self : str , *_a : str , **_a : List[str] ) -> int: return self.image_processor.post_process_object_detection(*_a , **_a ) def _lowercase ( self : int , *_a : List[Any] , **_a : Optional[int] ) -> str: return self.image_processor.post_process_image_guided_detection(*_a , **_a ) def _lowercase ( self : Tuple , *_a : Optional[Any] , **_a : List[Any] ) -> Tuple: return self.tokenizer.batch_decode(*_a , **_a ) def _lowercase ( self : str , *_a : Optional[Any] , **_a : str ) -> Union[str, Any]: return self.tokenizer.decode(*_a , **_a ) @property def _lowercase ( self : Any ) -> Any: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _a , ) return self.image_processor_class @property def _lowercase ( self : Union[str, Any] ) -> Any: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _a , ) return self.image_processor
208
1
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class a_ ( unittest.TestCase , snake_case_ ): '''simple docstring''' def a__ (self ): '''simple docstring''' lowerCamelCase__ : List[str] = load_tool('text-classification' ) self.tool.setup() lowerCamelCase__ : Dict = load_tool('text-classification', remote=lowerCamelCase_ ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : Dict = self.tool('That\'s quite cool', ['positive', 'negative'] ) self.assertEqual(lowerCamelCase_, 'positive' ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : Tuple = self.remote_tool('That\'s quite cool', ['positive', 'negative'] ) self.assertEqual(lowerCamelCase_, 'positive' ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : int = self.tool(text='That\'s quite cool', labels=['positive', 'negative'] ) self.assertEqual(lowerCamelCase_, 'positive' ) def a__ (self ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.remote_tool(text='That\'s quite cool', labels=['positive', 'negative'] ) self.assertEqual(lowerCamelCase_, 'positive' )
316
"""simple docstring""" import numpy as np def lowerCamelCase_ ( _lowerCamelCase ): return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
316
1
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets SCREAMING_SNAKE_CASE_: List[str] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n' SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n' SCREAMING_SNAKE_CASE_: Optional[Any] ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n' def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> str: '''simple docstring''' return float((preds == labels).mean() ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Dict ) -> int: '''simple docstring''' UpperCAmelCase_ = simple_accuracy(snake_case_ , snake_case_ ) UpperCAmelCase_ = float(fa_score(y_true=snake_case_ , y_pred=snake_case_ ) ) return { "accuracy": acc, "f1": fa, } def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : str ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = np.array(snake_case_ ) UpperCAmelCase_ = np.array(snake_case_ ) UpperCAmelCase_ = en_sentvecs.shape[0] # mean centering UpperCAmelCase_ = en_sentvecs - np.mean(snake_case_ , axis=0 ) UpperCAmelCase_ = in_sentvecs - np.mean(snake_case_ , axis=0 ) UpperCAmelCase_ = cdist(snake_case_ , snake_case_ , "cosine" ) UpperCAmelCase_ = np.array(range(snake_case_ ) ) UpperCAmelCase_ = sim.argsort(axis=1 )[:, :10] UpperCAmelCase_ = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): def _lowercase (self : List[str] ): if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), "references": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , ) def _lowercase (self : Tuple , __a : Any , __a : Union[str, Any] ): if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(__a , __a )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(__a , __a ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(__a , __a )} else: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" )
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return x if y == 0 else greatest_common_divisor(snake_case_ , x % y ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int: '''simple docstring''' UpperCAmelCase_ = 1 for i in range(1 , n + 1 ): UpperCAmelCase_ = lcm(snake_case_ , snake_case_ ) return g if __name__ == "__main__": print(f"{solution() = }")
1
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """andreasmadsen/efficient_mlm_m0.40""": ( """https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json""" ), } class _snake_case ( lowercase__): UpperCamelCase__ : List[Any] ="""roberta-prelayernorm""" def __init__( self : Optional[int], __lowercase : List[Any]=5_0265, __lowercase : int=768, __lowercase : int=12, __lowercase : Optional[Any]=12, __lowercase : int=3072, __lowercase : str="gelu", __lowercase : Any=0.1, __lowercase : Optional[int]=0.1, __lowercase : int=512, __lowercase : List[Any]=2, __lowercase : Tuple=0.02, __lowercase : List[Any]=1e-1_2, __lowercase : str=1, __lowercase : int=0, __lowercase : Optional[Any]=2, __lowercase : Union[str, Any]="absolute", __lowercase : Dict=True, __lowercase : Tuple=None, **__lowercase : Any, ): super().__init__(pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, **__lowercase ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache lowercase__ = classifier_dropout class _snake_case ( lowercase__): @property def A__ ( self : Optional[int] ): if self.task == "multiple-choice": lowercase__ = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
355
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = int(SCREAMING_SNAKE_CASE_ ) if decimal in (0, 1): # Exit cases for the recursion return str(SCREAMING_SNAKE_CASE_ ) lowercase__ , lowercase__ = divmod(SCREAMING_SNAKE_CASE_ , 2 ) return binary_recursive(SCREAMING_SNAKE_CASE_ ) + str(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(SCREAMING_SNAKE_CASE_ ).strip() if not number: raise ValueError("No input value was provided" ) lowercase__ = "-" if number.startswith("-" ) else "" lowercase__ = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return f'''{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE_ ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
224
0
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ["flax", "transformers"] def __init__( self : Optional[int] , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Any: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : List[str] , *_UpperCamelCase : int , **_UpperCamelCase : str ) ->Any: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Any ) ->Dict: requires_backends(cls , ['''flax''', '''transformers'''] ) class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = ["flax", "transformers"] def __init__( self : Any , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Optional[int] ) ->Dict: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : int ) ->str: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Dict ) ->Union[str, Any]: requires_backends(cls , ['''flax''', '''transformers'''] ) class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ["flax", "transformers"] def __init__( self : Union[str, Any] , *_UpperCamelCase : Dict , **_UpperCamelCase : Tuple ) ->List[str]: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : str ) ->Union[str, Any]: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : Any , *_UpperCamelCase : Any , **_UpperCamelCase : List[str] ) ->int: requires_backends(cls , ['''flax''', '''transformers'''] ) class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ["flax", "transformers"] def __init__( self : Tuple , *_UpperCamelCase : str , **_UpperCamelCase : int ) ->Any: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : List[Any] , *_UpperCamelCase : List[str] , **_UpperCamelCase : Union[str, Any] ) ->List[Any]: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def snake_case__( cls : Union[str, Any] , *_UpperCamelCase : str , **_UpperCamelCase : List[Any] ) ->List[Any]: requires_backends(cls , ['''flax''', '''transformers'''] )
8
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Any: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: snake_case_ = ViTConfig() snake_case_ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case_ = True snake_case_ = int(vit_name[-12:-10] ) snake_case_ = int(vit_name[-9:-6] ) else: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = int(vit_name[-6:-4] ) snake_case_ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 elif vit_name[9:].startswith("""small""" ): snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): snake_case_ = 768 snake_case_ = 2_304 snake_case_ = 8 snake_case_ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): snake_case_ = 1_024 snake_case_ = 4_096 snake_case_ = 24 snake_case_ = 16 elif vit_name[4:].startswith("""huge""" ): snake_case_ = 1_280 snake_case_ = 5_120 snake_case_ = 32 snake_case_ = 16 # load original model from timm snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = timm_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case_ = DeiTImageProcessor(size=config.image_size ) else: snake_case_ = ViTImageProcessor(size=config.image_size ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 ) else: snake_case_ = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
347
0
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput lowerCAmelCase__ = 8 def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ): """simple docstring""" UpperCamelCase = x.device UpperCamelCase = (x * 255).int().clamp(0 , 255 ) UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE ) UpperCamelCase = rearrange(_SCREAMING_SNAKE_CASE , "d -> d 1 1" ) UpperCamelCase = rearrange(_SCREAMING_SNAKE_CASE , "b c h w -> b c 1 h w" ) UpperCamelCase = ((x & mask) != 0).float() UpperCamelCase = rearrange(_SCREAMING_SNAKE_CASE , "b c d h w -> b (c d) h w" ) UpperCamelCase = bits * 2 - 1 return bits def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=BITS ): """simple docstring""" UpperCamelCase = x.device UpperCamelCase = (x > 0).int() UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa ) UpperCamelCase = rearrange(_SCREAMING_SNAKE_CASE , "d -> d 1 1" ) UpperCamelCase = rearrange(_SCREAMING_SNAKE_CASE , "b (c d) h w -> b c d h w" , d=8 ) UpperCamelCase = reduce(x * mask , "b c d h w -> b c h w" , "sum" ) return (dec / 255).clamp(0.0 , 1.0 ) def a__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ): """simple docstring""" if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas UpperCamelCase = self.alphas_cumprod[timestep] UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod UpperCamelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" UpperCamelCase = self.bit_scale if self.config.clip_sample: UpperCamelCase = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) UpperCamelCase = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCamelCase = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 UpperCamelCase = model_output.device if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else "cpu" UpperCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise UpperCamelCase = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) def a__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="epsilon" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ): """simple docstring""" UpperCamelCase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: UpperCamelCase , UpperCamelCase = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 ) else: UpperCamelCase = None # 1. compute alphas, betas UpperCamelCase = self.alphas_cumprod[t] UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one UpperCamelCase = 1 - alpha_prod_t UpperCamelCase = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": UpperCamelCase = model_output else: raise ValueError(F"Unsupported prediction_type {prediction_type}." ) # 3. Clip "predicted x_0" UpperCamelCase = self.bit_scale if self.config.clip_sample: UpperCamelCase = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise UpperCamelCase = 0 if t > 0: UpperCamelCase = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_SCREAMING_SNAKE_CASE ).to(model_output.device ) UpperCamelCase = (self._get_variance(_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise UpperCamelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) class _lowerCamelCase ( _lowercase ): def __init__(self , __a , __a , __a = 1.0 , ) -> Tuple: super().__init__() UpperCamelCase = bit_scale UpperCamelCase = ( ddim_bit_scheduler_step if isinstance(__a , __a ) else ddpm_bit_scheduler_step ) self.register_modules(unet=__a , scheduler=__a ) @torch.no_grad() def __call__(self , __a = 2_56 , __a = 2_56 , __a = 50 , __a = None , __a = 1 , __a = "pil" , __a = True , **__a , ) -> Union[Tuple, ImagePipelineOutput]: UpperCamelCase = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=__a , ) UpperCamelCase = decimal_to_bits(__a ) * self.bit_scale UpperCamelCase = latents.to(self.device ) self.scheduler.set_timesteps(__a ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual UpperCamelCase = self.unet(__a , __a ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase = self.scheduler.step(__a , __a , __a ).prev_sample UpperCamelCase = bits_to_decimal(__a ) if output_type == "pil": UpperCamelCase = self.numpy_to_pil(__a ) if not return_dict: return (image,) return ImagePipelineOutput(images=__a )
367
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCamelCase : def __init__(self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1e-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , ) -> Any: UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = depths UpperCamelCase = num_heads UpperCamelCase = window_size UpperCamelCase = mlp_ratio UpperCamelCase = qkv_bias UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = hidden_act UpperCamelCase = use_absolute_embeddings UpperCamelCase = patch_norm UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range UpperCamelCase = is_training UpperCamelCase = scope UpperCamelCase = use_labels UpperCamelCase = type_sequence_label_size UpperCamelCase = encoder_stride def snake_case_ (self ) -> List[str]: UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, pixel_values, labels def snake_case_ (self ) -> Union[str, Any]: return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def snake_case_ (self , __a , __a , __a ) -> Dict: UpperCamelCase = SwinvaModel(config=__a ) model.to(__a ) model.eval() UpperCamelCase = model(__a ) UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def snake_case_ (self , __a , __a , __a ) -> Any: UpperCamelCase = SwinvaForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() UpperCamelCase = model(__a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = SwinvaForMaskedImageModeling(__a ) model.to(__a ) model.eval() UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def snake_case_ (self , __a , __a , __a ) -> int: UpperCamelCase = self.type_sequence_label_size UpperCamelCase = SwinvaForImageClassification(__a ) model.to(__a ) model.eval() UpperCamelCase = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case_ (self ) -> List[Any]: UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _lowerCamelCase ( _lowercase , _lowercase , unittest.TestCase ): UpperCAmelCase_ = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) UpperCAmelCase_ = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False def snake_case_ (self ) -> Union[str, Any]: UpperCamelCase = SwinvaModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=__a , embed_dim=37 ) def snake_case_ (self ) -> Tuple: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case_ (self ) -> List[Any]: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." ) def snake_case_ (self ) -> Optional[int]: pass @unittest.skip(reason="Swinv2 does not use inputs_embeds" ) def snake_case_ (self ) -> Union[str, Any]: pass def snake_case_ (self ) -> Optional[int]: UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def snake_case_ (self ) -> Optional[int]: UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(__a ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def snake_case_ (self ) -> int: UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = True for model_class in self.all_model_classes: UpperCamelCase = True UpperCamelCase = False UpperCamelCase = True UpperCamelCase = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(__a , __a ) ) UpperCamelCase = outputs.attentions UpperCamelCase = len(self.model_tester.depths ) self.assertEqual(len(__a ) , __a ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCamelCase = True UpperCamelCase = config.window_size**2 UpperCamelCase = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(__a , __a ) ) UpperCamelCase = outputs.attentions self.assertEqual(len(__a ) , __a ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) UpperCamelCase = len(__a ) # Check attention is always last and order is fine UpperCamelCase = True UpperCamelCase = True UpperCamelCase = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(__a , __a ) ) if hasattr(self.model_tester , "num_hidden_states_types" ): UpperCamelCase = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states UpperCamelCase = 2 self.assertEqual(out_len + added_hidden_states , len(__a ) ) UpperCamelCase = outputs.attentions self.assertEqual(len(__a ) , __a ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def snake_case_ (self , __a , __a , __a , __a ) -> int: UpperCamelCase = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(__a , __a ) ) UpperCamelCase = outputs.hidden_states UpperCamelCase = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__a ) , __a ) # Swinv2 has a different seq_length UpperCamelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) UpperCamelCase = outputs.reshaped_hidden_states self.assertEqual(len(__a ) , __a ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = reshaped_hidden_states[0].shape UpperCamelCase = ( reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def snake_case_ (self ) -> str: UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCamelCase = True self.check_hidden_states_output(__a , __a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True self.check_hidden_states_output(__a , __a , __a , __a ) def snake_case_ (self ) -> Tuple: UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCamelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCamelCase = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) def snake_case_ (self ) -> Union[str, Any]: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def snake_case_ (self ) -> Optional[Any]: UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def snake_case_ (self ) -> Tuple: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = SwinvaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case_ (self ) -> List[Any]: UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = _config_zero_init(__a ) for model_class in self.all_model_classes: UpperCamelCase = model_class(config=__a ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @require_vision @require_torch class _lowerCamelCase ( unittest.TestCase ): @cached_property def snake_case_ (self ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ) if is_vision_available() else None ) @slow def snake_case_ (self ) -> str: UpperCamelCase = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to( __a ) UpperCamelCase = self.default_image_processor UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) UpperCamelCase = image_processor(images=__a , return_tensors="pt" ).to(__a ) # forward pass with torch.no_grad(): UpperCamelCase = model(**__a ) # verify the logits UpperCamelCase = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __a ) UpperCamelCase = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
244
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) __lowerCAmelCase : Optional[Any] ={ 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = '''roformer''' def __init__( self :Union[str, Any] , lowerCAmelCase__ :Any=50_000 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :Optional[Any]=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :List[str]=3_072 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Union[str, Any]=1_536 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=1E-1_2 , lowerCAmelCase__ :Any=0 , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :List[str] , ) -> Optional[int]: super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = vocab_size __SCREAMING_SNAKE_CASE : str = hidden_size if embedding_size is None else embedding_size __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size __SCREAMING_SNAKE_CASE : Dict = num_hidden_layers __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : str = hidden_act __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Dict = max_position_embeddings __SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size __SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : List[str] = rotary_value __SCREAMING_SNAKE_CASE : Tuple = use_cache class _lowercase ( A__ ): '''simple docstring''' @property def __magic_name__( self :List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __SCREAMING_SNAKE_CASE : int = {0: '''batch''', 1: '''sequence'''} __SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
9
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class _lowercase ( A__ ): '''simple docstring''' def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple: super().__init__( lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths} __SCREAMING_SNAKE_CASE : int = Text( cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , ) def __magic_name__( self :Dict ) -> Tuple: # Build iterable dataset if self.streaming: __SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __SCREAMING_SNAKE_CASE : List[str] = None __SCREAMING_SNAKE_CASE : str = None __SCREAMING_SNAKE_CASE : Dict = None __SCREAMING_SNAKE_CASE : Tuple = None self.builder.download_and_prepare( download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , ) __SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory ) return dataset
9
1
def lowerCamelCase__ ( a__ : Tuple , a__ : Optional[Any] ) -> Any: UpperCamelCase_ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase__ ( a__ : Optional[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[int]: UpperCamelCase_ = 0 while b > 0: if b & 1: UpperCamelCase_ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
261
import comet # From: unbabel-comet import torch import datasets _A = datasets.logging.get_logger(__name__) _A = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' _A = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' _A = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): def lowerCamelCase_ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """sources""": datasets.Value("""string""" , id="""sequence""" ), """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[ """https://github.com/Unbabel/COMET""", """https://www.aclweb.org/anthology/2020.emnlp-main.213/""", """http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""", ] , ) def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" if self.config_name == "default": UpperCamelCase_ = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) ) else: UpperCamelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=False ): """simple docstring""" if gpus is None: UpperCamelCase_ = 1 if torch.cuda.is_available() else 0 UpperCamelCase_ = {"""src""": sources, """mt""": predictions, """ref""": references} UpperCamelCase_ = [dict(zip(__UpperCamelCase , __UpperCamelCase ) ) for t in zip(*data.values() )] UpperCamelCase_ , UpperCamelCase_ = self.scorer.predict(__UpperCamelCase , gpus=__UpperCamelCase , progress_bar=__UpperCamelCase ) return {"mean_score": mean_score, "scores": scores}
261
1
'''simple docstring''' from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake UpperCamelCase_ = numpy.array([0, 0]) UpperCamelCase_ = numpy.array([0.5, 0.8_6_6_0_2_5_4]) UpperCamelCase_ = numpy.array([1, 0]) UpperCamelCase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def _UpperCAmelCase ( _lowerCamelCase : list[numpy.ndarray] , _lowerCamelCase : int ) -> list[numpy.ndarray]: _lowerCAmelCase : Dict = initial_vectors for _ in range(_lowerCamelCase ): _lowerCAmelCase : Any = iteration_step(_lowerCamelCase ) return vectors def _UpperCAmelCase ( _lowerCamelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]: _lowerCAmelCase : Union[str, Any] = [] for i, start_vector in enumerate(vectors[:-1] ): _lowerCAmelCase : Optional[int] = vectors[i + 1] new_vectors.append(_lowerCamelCase ) _lowerCAmelCase : Dict = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def _UpperCAmelCase ( _lowerCamelCase : numpy.ndarray , _lowerCamelCase : float ) -> numpy.ndarray: _lowerCAmelCase : int = numpy.radians(_lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Tuple = numpy.cos(_lowerCamelCase ), numpy.sin(_lowerCamelCase ) _lowerCAmelCase : List[Any] = numpy.array(((c, -s), (s, c)) ) return numpy.dot(_lowerCamelCase , _lowerCamelCase ) def _UpperCAmelCase ( _lowerCamelCase : list[numpy.ndarray] ) -> None: _lowerCAmelCase : List[Any] = plt.gca() axes.set_aspect("""equal""" ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() _lowerCAmelCase , _lowerCAmelCase : Tuple = zip(*_lowerCamelCase ) plt.plot(_lowerCamelCase , _lowerCamelCase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase_ = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
309
'''simple docstring''' import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a_ : def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ): _lowerCAmelCase : Optional[Any] = parent _lowerCAmelCase : Any = batch_size _lowerCAmelCase : Tuple = image_size _lowerCAmelCase : int = patch_size _lowerCAmelCase : Any = num_channels _lowerCAmelCase : str = is_training _lowerCAmelCase : Any = use_labels _lowerCAmelCase : List[Any] = hidden_size _lowerCAmelCase : int = num_hidden_layers _lowerCAmelCase : Dict = num_attention_heads _lowerCAmelCase : Union[str, Any] = intermediate_size _lowerCAmelCase : Dict = hidden_act _lowerCAmelCase : str = hidden_dropout_prob _lowerCAmelCase : Optional[int] = attention_probs_dropout_prob _lowerCAmelCase : Any = type_sequence_label_size _lowerCAmelCase : str = initializer_range _lowerCAmelCase : Optional[Any] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2 _lowerCAmelCase : Dict = num_patches + 1 def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : List[str] = None if self.use_labels: _lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _lowerCAmelCase : Optional[Any] = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase : Tuple = self.type_sequence_label_size _lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ ) model.to(snake_case_ ) model.eval() _lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase : int = 1 _lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ ) model.to(snake_case_ ) model.eval() _lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase : Optional[int] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCamelCase ( self ): _lowerCAmelCase : List[str] = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs _lowerCAmelCase : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a_ (_a , _a , unittest.TestCase ): __lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __lowerCAmelCase : Optional[int] = ( {"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase : Dict = False __lowerCAmelCase : Optional[Any] = False __lowerCAmelCase : List[str] = False __lowerCAmelCase : Any = False def __UpperCamelCase ( self ): _lowerCAmelCase : Tuple = ViTMSNModelTester(self ) _lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 ) def __UpperCamelCase ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def __UpperCamelCase ( self ): pass def __UpperCamelCase ( self ): _lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : List[str] = model_class(snake_case_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) ) def __UpperCamelCase ( self ): _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Optional[int] = model_class(snake_case_ ) _lowerCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()] _lowerCAmelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def __UpperCamelCase ( self ): _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def __UpperCamelCase ( self ): _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def __UpperCamelCase ( self ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def _UpperCAmelCase ( ) -> Tuple: _lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class a_ (unittest.TestCase ): @cached_property def __UpperCamelCase ( self ): return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def __UpperCamelCase ( self ): torch.manual_seed(2 ) _lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ ) _lowerCAmelCase : Dict = self.default_image_processor _lowerCAmelCase : Any = prepare_img() _lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ ) # forward pass with torch.no_grad(): _lowerCAmelCase : Dict = model(**snake_case_ ) # verify the logits _lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case_ ) _lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
309
1
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowerCamelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowerCamelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowerCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) lowerCamelCase = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) lowerCamelCase = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions lowerCamelCase = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(64, 64) ) lowerCamelCase = tf.keras.preprocessing.image.img_to_array(test_image) lowerCamelCase = np.expand_dims(test_image, axis=0) lowerCamelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowerCamelCase = """Normal""" if result[0][0] == 1: lowerCamelCase = """Abnormality detected"""
48
'''simple docstring''' import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ): """simple docstring""" if config_name_or_path is None: __lowercase ='facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base' if generator_tokenizer_name_or_path is None: __lowercase =generator_name_or_path if question_encoder_tokenizer_name_or_path is None: __lowercase =question_encoder_name_or_path __lowercase =RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration # Save model. __lowercase =RagConfig.from_pretrained(_lowerCAmelCase ) __lowercase =AutoConfig.from_pretrained(_lowerCAmelCase ) __lowercase =AutoConfig.from_pretrained(_lowerCAmelCase ) __lowercase =gen_config __lowercase =question_encoder_config __lowercase =model_class.from_pretrained_question_encoder_generator( _lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase ) rag_model.save_pretrained(_lowerCAmelCase ) # Sanity check. model_class.from_pretrained(_lowerCAmelCase ) # Save tokenizers. __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' ) __lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) lowerCamelCase = parser.parse_args() lowerCamelCase = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
48
1
from __future__ import annotations def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): SCREAMING_SNAKE_CASE_: Optional[int] = len(_UpperCAmelCase ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(_UpperCAmelCase ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCAmelCase , _UpperCAmelCase , ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: list[list[str]] = [] depth_first_search([] , [] , [] , _UpperCAmelCase , _UpperCAmelCase ) # Print all the boards for board in boards: for column in board: print(_UpperCAmelCase ) print("" ) print(len(_UpperCAmelCase ) , "solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
13
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowercase__: str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} lowercase__: Optional[Any] = parent lowercase__: List[Any] = batch_size lowercase__: Tuple = num_channels lowercase__: Optional[Any] = min_resolution lowercase__: Dict = max_resolution lowercase__: Optional[int] = do_resize lowercase__: Any = size lowercase__: Optional[Any] = do_normalize lowercase__: Union[str, Any] = image_mean lowercase__: Tuple = image_std lowercase__: str = do_rescale lowercase__: Any = rescale_factor lowercase__: List[Any] = do_pad def _snake_case ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ): if not batched: lowercase__: Optional[Any] = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): lowercase__, lowercase__: Dict = image.size else: lowercase__, lowercase__: Optional[Any] = image.shape[1], image.shape[2] if w < h: lowercase__: List[str] = int(self.size['''shortest_edge'''] * h / w ) lowercase__: Union[str, Any] = self.size['''shortest_edge'''] elif w > h: lowercase__: int = self.size['''shortest_edge'''] lowercase__: int = int(self.size['''shortest_edge'''] * w / h ) else: lowercase__: Union[str, Any] = self.size['''shortest_edge'''] lowercase__: Union[str, Any] = self.size['''shortest_edge'''] else: lowercase__: Optional[int] = [] for image in image_inputs: lowercase__, lowercase__: int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase__: Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] lowercase__: Dict = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Optional[int] = YolosImageProcessor if is_vision_available() else None def _snake_case ( self ): lowercase__: int = YolosImageProcessingTester(self ) @property def _snake_case ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self ): lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) ) def _snake_case ( self ): lowercase__: Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , _UpperCAmelCase ) lowercase__: Any = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , _UpperCAmelCase ) def _snake_case ( self ): pass def _snake_case ( self ): # Initialize image_processing lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input lowercase__: int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__, lowercase__: Any = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) lowercase__: int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ): # Initialize image_processing lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input lowercase__: List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: str = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__: Dict = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: str = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ): # Initialize image_processing lowercase__: Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input lowercase__: Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: int = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__: List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values lowercase__, lowercase__: List[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case ( self ): # Initialize image_processings lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) lowercase__: Optional[Any] = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase ) # create random PyTorch tensors lowercase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors lowercase__: List[str] = image_processing_a.pad(_UpperCAmelCase , return_tensors='''pt''' ) lowercase__: Tuple = image_processing_a(_UpperCAmelCase , return_tensors='''pt''' ) self.assertTrue( torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) ) @slow def _snake_case ( self ): # prepare image and target lowercase__: Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: lowercase__: Any = json.loads(f.read() ) lowercase__: Dict = {'''image_id''': 39769, '''annotations''': target} # encode them lowercase__: Dict = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' ) lowercase__: Any = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='''pt''' ) # verify pixel values lowercase__: Optional[Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase ) lowercase__: Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) ) # verify area lowercase__: Tuple = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) ) # verify boxes lowercase__: str = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) ) # verify image_id lowercase__: Optional[int] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) ) # verify is_crowd lowercase__: Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) ) # verify class_labels lowercase__: Dict = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) ) # verify orig_size lowercase__: List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) ) # verify size lowercase__: List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) ) @slow def _snake_case ( self ): # prepare image, target and masks_path lowercase__: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: lowercase__: str = json.loads(f.read() ) lowercase__: List[Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} lowercase__: Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowercase__: Union[str, Any] = YolosImageProcessor(format='''coco_panoptic''' ) lowercase__: Optional[Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='''pt''' ) # verify pixel values lowercase__: Optional[int] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase ) lowercase__: Dict = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) ) # verify area lowercase__: str = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) ) # verify boxes lowercase__: List[str] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase ) lowercase__: List[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) ) # verify image_id lowercase__: int = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) ) # verify is_crowd lowercase__: int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) ) # verify class_labels lowercase__: Dict = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) ) # verify masks lowercase__: Union[str, Any] = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _UpperCAmelCase ) # verify orig_size lowercase__: List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) ) # verify size lowercase__: Union[str, Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) )
177
0
"""simple docstring""" import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def snake_case_ ( A_ : str, A_ : str, A_ : Optional[int] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = UniSpeechSatForSequenceClassification.from_pretrained(A_, config=A_ ) _lowerCamelCase : Any = downstream_dict['''projector.weight'''] _lowerCamelCase : Tuple = downstream_dict['''projector.bias'''] _lowerCamelCase : str = downstream_dict['''model.post_net.linear.weight'''] _lowerCamelCase : Tuple = downstream_dict['''model.post_net.linear.bias'''] return model def snake_case_ ( A_ : List[Any], A_ : List[Any], A_ : Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(A_, config=A_ ) _lowerCamelCase : int = downstream_dict['''model.linear.weight'''] _lowerCamelCase : int = downstream_dict['''model.linear.bias'''] return model def snake_case_ ( A_ : Optional[Any], A_ : int, A_ : str ): '''simple docstring''' _lowerCamelCase : Any = UniSpeechSatForXVector.from_pretrained(A_, config=A_ ) _lowerCamelCase : str = downstream_dict['''connector.weight'''] _lowerCamelCase : Union[str, Any] = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _lowerCamelCase : Optional[int] = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] _lowerCamelCase : Union[str, Any] = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] _lowerCamelCase : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] _lowerCamelCase : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] _lowerCamelCase : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] _lowerCamelCase : Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] _lowerCamelCase : Tuple = downstream_dict['''objective.W'''] return model @torch.no_grad() def snake_case_ ( A_ : Any, A_ : Optional[Any], A_ : Dict, A_ : str ): '''simple docstring''' _lowerCamelCase : Dict = torch.load(A_, map_location='''cpu''' ) _lowerCamelCase : int = checkpoint['''Downstream'''] _lowerCamelCase : List[str] = UniSpeechSatConfig.from_pretrained(A_ ) _lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) _lowerCamelCase : List[str] = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): _lowerCamelCase : Any = convert_classification(A_, A_, A_ ) elif arch.endswith('''ForAudioFrameClassification''' ): _lowerCamelCase : Union[str, Any] = convert_diarization(A_, A_, A_ ) elif arch.endswith('''ForXVector''' ): _lowerCamelCase : Optional[int] = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: _lowerCamelCase : Dict = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') lowerCAmelCase__ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
175
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __snake_case ( _lowercase): def __init__( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=9_9 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Optional[int]="last" , __lowerCAmelCase : str=None , __lowerCAmelCase : int=None , ): """simple docstring""" _lowerCamelCase : Dict = parent _lowerCamelCase : List[str] = batch_size _lowerCamelCase : Dict = seq_length _lowerCamelCase : List[Any] = is_training _lowerCamelCase : Dict = use_input_lengths _lowerCamelCase : Tuple = use_token_type_ids _lowerCamelCase : Any = use_labels _lowerCamelCase : Optional[Any] = gelu_activation _lowerCamelCase : Optional[Any] = sinusoidal_embeddings _lowerCamelCase : Dict = causal _lowerCamelCase : Dict = asm _lowerCamelCase : str = n_langs _lowerCamelCase : str = vocab_size _lowerCamelCase : Optional[int] = n_special _lowerCamelCase : Dict = hidden_size _lowerCamelCase : int = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : Dict = hidden_dropout_prob _lowerCamelCase : int = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Any = type_vocab_size _lowerCamelCase : Optional[int] = type_sequence_label_size _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : List[Any] = num_labels _lowerCamelCase : Dict = num_choices _lowerCamelCase : str = summary_type _lowerCamelCase : List[str] = use_proj _lowerCamelCase : int = scope def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Optional[int] = None if self.use_input_lengths: _lowerCamelCase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _lowerCamelCase : Union[str, Any] = None if self.use_token_type_ids: _lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : List[str] = None _lowerCamelCase : Optional[Any] = None if self.use_labels: _lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCamelCase : str = ids_tensor([self.batch_size] , 2 ).float() _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) _lowerCamelCase : Tuple = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , ): """simple docstring""" _lowerCamelCase : Optional[Any] = FlaubertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase ) _lowerCamelCase : str = model(__lowerCAmelCase , langs=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , ): """simple docstring""" _lowerCamelCase : Tuple = FlaubertWithLMHeadModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , ): """simple docstring""" _lowerCamelCase : Union[str, Any] = FlaubertForQuestionAnsweringSimple(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[str] = model(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , ): """simple docstring""" _lowerCamelCase : str = FlaubertForQuestionAnswering(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , ) _lowerCamelCase : List[str] = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , ) ((_lowerCamelCase) , ) : str = result_with_labels.to_tuple() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) ((_lowerCamelCase) , ) : Union[str, Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str , ): """simple docstring""" _lowerCamelCase : Dict = FlaubertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = model(__lowerCAmelCase ) _lowerCamelCase : Tuple = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , ): """simple docstring""" _lowerCamelCase : Any = self.num_labels _lowerCamelCase : List[str] = FlaubertForTokenClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , ): """simple docstring""" _lowerCamelCase : List[str] = self.num_choices _lowerCamelCase : Any = FlaubertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCamelCase : int = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Any = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Optional[Any] = config_and_inputs _lowerCamelCase : int = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : List[str] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) snake_case__ : List[Any] = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=False ): """simple docstring""" _lowerCamelCase : Dict = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": _lowerCamelCase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) _lowerCamelCase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : int = FlaubertModelTester(self ) _lowerCamelCase : str = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = FlaubertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return _lowerCamelCase : Any = True _lowerCamelCase : int = model_class(config=__lowerCAmelCase ) _lowerCamelCase : List[str] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : int = torch.jit.trace( __lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) ) _lowerCamelCase : Union[str, Any] = torch.jit.load(os.path.join(__lowerCAmelCase , '''traced_model.pt''' ) , map_location=__lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(__lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(__lowerCAmelCase ) ) @require_torch class __snake_case ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) _lowerCamelCase : Any = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) with torch.no_grad(): _lowerCamelCase : Any = model(__lowerCAmelCase )[0] _lowerCamelCase : Optional[Any] = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , __lowerCAmelCase ) _lowerCamelCase : Optional[int] = torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) )
175
1
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __lowerCAmelCase : Dict =(3, 9, -1_1, 0, 7, 5, 1, -1) __lowerCAmelCase : int =(4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class _lowercase : '''simple docstring''' SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : Node | None class _lowercase : '''simple docstring''' def __init__( self :str , lowerCAmelCase__ :Iterable[int] ) -> None: __SCREAMING_SNAKE_CASE : Node | None = None for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : int = Node(lowerCAmelCase__ , self.head ) def __iter__( self :Optional[Any] ) -> Iterator[int]: __SCREAMING_SNAKE_CASE : Tuple = self.head while node: yield node.data __SCREAMING_SNAKE_CASE : int = node.next_node def __len__( self :Optional[int] ) -> int: return sum(1 for _ in self ) def __str__( self :List[Any] ) -> str: return " -> ".join([str(lowerCAmelCase__ ) for node in self] ) def _UpperCamelCase ( lowercase__ , lowercase__ ): return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Union[str, Any] =SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
9
def a__ ( __UpperCamelCase ): if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ): return 0 elif n == 2: return 1 else: SCREAMING_SNAKE_CASE_ = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 2 while digits < n: index += 1 SCREAMING_SNAKE_CASE_ = len(str(fibonacci(__UpperCamelCase ) ) ) return index def a__ ( __UpperCamelCase = 1_0_0_0 ): return fibonacci_digits_index(__UpperCamelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
118
0
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class UpperCAmelCase_ : def __init__( self : Optional[Any] , A : Dict , A : int=9_9 , A : List[Any]=1_3 , A : int=7 , A : List[str]=9 , A : List[Any]=True , A : Any=True , A : List[str]=False , A : Dict=3_2 , A : List[Any]=5 , A : Union[str, Any]=4 , A : str=3_7 , A : List[Any]=8 , A : Optional[Any]=0.1 , A : List[str]=0.002 , A : Any=1 , A : List[Any]=0 , A : int=0 , A : str=None , A : Tuple=None , ): _UpperCAmelCase : Dict = parent _UpperCAmelCase : str = batch_size _UpperCAmelCase : List[str] = encoder_seq_length _UpperCAmelCase : int = decoder_seq_length # For common tests _UpperCAmelCase : List[str] = self.decoder_seq_length _UpperCAmelCase : str = is_training _UpperCAmelCase : Dict = use_attention_mask _UpperCAmelCase : Dict = use_labels _UpperCAmelCase : int = vocab_size _UpperCAmelCase : Dict = hidden_size _UpperCAmelCase : Optional[int] = num_hidden_layers _UpperCAmelCase : List[Any] = num_attention_heads _UpperCAmelCase : Dict = d_ff _UpperCAmelCase : Optional[int] = relative_attention_num_buckets _UpperCAmelCase : List[Any] = dropout_rate _UpperCAmelCase : Any = initializer_factor _UpperCAmelCase : int = eos_token_id _UpperCAmelCase : Optional[Any] = pad_token_id _UpperCAmelCase : List[Any] = decoder_start_token_id _UpperCAmelCase : List[Any] = None _UpperCAmelCase : Dict = decoder_layers def snake_case_ ( self : Optional[int] ): return TaConfig.from_pretrained("google/umt5-base" ) def snake_case_ ( self : Union[str, Any] , A : List[Any] , A : List[Any] , A : Optional[Any] , A : List[str]=None , A : Any=None , A : Optional[int]=None , A : Any=None , A : List[Any]=None , ): if attention_mask is None: _UpperCAmelCase : int = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _UpperCAmelCase : Optional[Any] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _UpperCAmelCase : Dict = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=A ) if decoder_head_mask is None: _UpperCAmelCase : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=A ) if cross_attn_head_mask is None: _UpperCAmelCase : Dict = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=A ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def snake_case_ ( self : Any ): _UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _UpperCAmelCase : str = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _UpperCAmelCase : Dict = input_ids.clamp(self.pad_token_id + 1 ) _UpperCAmelCase : Dict = decoder_input_ids.clamp(self.pad_token_id + 1 ) _UpperCAmelCase : Optional[int] = self.get_config() _UpperCAmelCase : Tuple = config.num_attention_heads _UpperCAmelCase : Union[str, Any] = self.prepare_inputs_dict(A , A , A ) return config, input_dict def snake_case_ ( self : Dict ): _UpperCAmelCase , _UpperCAmelCase : int = self.prepare_config_and_inputs() return config, inputs_dict def snake_case_ ( self : Optional[int] ): return TaConfig( vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def snake_case_ ( self : Optional[Any] ): return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def snake_case_ ( self : Any , A : Union[str, Any] , A : Dict , A : Optional[int] , A : List[Any] , A : Union[str, Any] , A : int , ): _UpperCAmelCase : Any = UMTaModel(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Union[str, Any] = model( input_ids=A , decoder_input_ids=A , attention_mask=A , decoder_attention_mask=A , ) _UpperCAmelCase : List[Any] = model(input_ids=A , decoder_input_ids=A ) _UpperCAmelCase : int = result.last_hidden_state _UpperCAmelCase : Dict = result.past_key_values _UpperCAmelCase : Tuple = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(A ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def snake_case_ ( self : int , A : Optional[Any] , A : Tuple , A : List[Any] , A : int , A : int , A : Any , ): _UpperCAmelCase : List[Any] = UMTaModel(config=A ).get_decoder().to(A ).eval() # first forward pass _UpperCAmelCase : str = model(A , use_cache=A ) _UpperCAmelCase : Any = model(A ) _UpperCAmelCase : Any = model(A , use_cache=A ) self.parent.assertTrue(len(A ) == len(A ) ) self.parent.assertTrue(len(A ) == len(A ) + 1 ) _UpperCAmelCase , _UpperCAmelCase : str = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _UpperCAmelCase : Any = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase : Dict = model(A )["last_hidden_state"] _UpperCAmelCase : Optional[Any] = model(A , past_key_values=A )["last_hidden_state"] # select random slice _UpperCAmelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase : Tuple = output_from_no_past[:, -1, random_slice_idx].detach() _UpperCAmelCase : str = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def snake_case_ ( self : List[str] , A : List[Any] , A : List[str] , ): _UpperCAmelCase : List[Any] = UMTaModel(config=A ).to(A ).half().eval() _UpperCAmelCase : Optional[Any] = model(**A )["last_hidden_state"] self.parent.assertFalse(torch.isnan(A ).any().item() ) @require_torch class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : str = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : str = (UMTaForConditionalGeneration,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : int = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : List[str] = True __SCREAMING_SNAKE_CASE : List[str] = False __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Optional[int] = True __SCREAMING_SNAKE_CASE : str = True # The small UMT5 model needs higher percentages for CPU/MP tests __SCREAMING_SNAKE_CASE : str = [0.8, 0.9] def snake_case_ ( self : Tuple ): _UpperCAmelCase : Tuple = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def snake_case_ ( self : int ): _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(A ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=A , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def snake_case_ ( self : List[str] ): _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*A ) def snake_case_ ( self : List[Any] ): _UpperCAmelCase : Optional[int] = ["encoder_attentions", "decoder_attentions", "cross_attentions"] _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase : Dict = config_and_inputs[0] _UpperCAmelCase : List[Any] = UMTaForConditionalGeneration(A ).eval() model.to(A ) _UpperCAmelCase : int = { "head_mask": torch.zeros(config.num_layers , config.num_heads , device=A ), "decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=A ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=A ), } for attn_name, (name, mask) in zip(A , head_masking.items() ): _UpperCAmelCase : Any = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _UpperCAmelCase : int = torch.ones( config.num_decoder_layers , config.num_heads , device=A ) _UpperCAmelCase : Any = model.generate( config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=A , return_dict_in_generate=A , **A , ) # We check the state of decoder_attentions and cross_attentions just from the last step _UpperCAmelCase : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def snake_case_ ( self : Dict ): pass @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def snake_case_ ( self : int ): _UpperCAmelCase : Tuple = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=A ).to(A ) _UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=A , legacy=A ) _UpperCAmelCase : Optional[Any] = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] _UpperCAmelCase : Optional[Any] = tokenizer(A , return_tensors="pt" , padding=A ).input_ids # fmt: off _UpperCAmelCase : Union[str, Any] = torch.tensor( [ [ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1], ] ) # fmt: on torch.testing.assert_allclose(A , A ) _UpperCAmelCase : List[Any] = model.generate(input_ids.to(A ) ) _UpperCAmelCase : List[Any] = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] _UpperCAmelCase : str = tokenizer.batch_decode(A ) self.assertEqual(A , A )
202
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer _lowerCAmelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : int = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } _lowerCAmelCase : List[Any] = { "google/electra-small-generator": 5_12, "google/electra-base-generator": 5_12, "google/electra-large-generator": 5_12, "google/electra-small-discriminator": 5_12, "google/electra-base-discriminator": 5_12, "google/electra-large-discriminator": 5_12, } _lowerCAmelCase : Optional[Any] = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Tuple = ElectraTokenizer def __init__( self : Dict , A : Dict=None , A : Optional[int]=None , A : Dict=True , A : Optional[Any]="[UNK]" , A : Any="[SEP]" , A : str="[PAD]" , A : Tuple="[CLS]" , A : Optional[Any]="[MASK]" , A : Any=True , A : Tuple=None , **A : Any , ): super().__init__( A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , ) _UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , A ) != do_lower_case or normalizer_state.get("strip_accents" , A ) != strip_accents or normalizer_state.get("handle_chinese_chars" , A ) != tokenize_chinese_chars ): _UpperCAmelCase : Union[str, Any] = getattr(A , normalizer_state.pop("type" ) ) _UpperCAmelCase : Dict = do_lower_case _UpperCAmelCase : Optional[int] = strip_accents _UpperCAmelCase : Any = tokenize_chinese_chars _UpperCAmelCase : Optional[Any] = normalizer_class(**A ) _UpperCAmelCase : int = do_lower_case def snake_case_ ( self : Tuple , A : str , A : int=None ): _UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case_ ( self : Any , A : List[int] , A : Optional[List[int]] = None ): _UpperCAmelCase : Any = [self.sep_token_id] _UpperCAmelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case_ ( self : Any , A : str , A : Optional[str] = None ): _UpperCAmelCase : List[Any] = self._tokenizer.model.save(A , name=A ) return tuple(A )
202
1
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __snake_case = logging.get_logger(__name__) __snake_case = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __snake_case = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } __snake_case = { '''facebook/blenderbot_small-90M''': 512, } class lowercase ( A__ ): """simple docstring""" _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = BlenderbotSmallTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :Union[str, Any] = add_prefix_space def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ): '''simple docstring''' UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :Optional[int] = [self.sep_token_id] UpperCamelCase__ :Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
97
from __future__ import annotations def a__ ( UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase ) # We need to create solution object to save path. UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )] UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase ) if solved: print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool: UpperCAmelCase : Dict = len(UpperCAmelCase ) # Final check point. if i == j == (size - 1): UpperCAmelCase : Dict = 1 return True UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited UpperCAmelCase : str = 1 # check for directions if ( run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase ) or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase ) or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase ) ): return True UpperCAmelCase : Any = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
336
0
"""simple docstring""" def lowercase__ ( _UpperCAmelCase ) -> int: '''simple docstring''' if not numbers: return 0 if not isinstance(_UpperCAmelCase , (list, tuple) ) or not all( isinstance(_UpperCAmelCase , _UpperCAmelCase ) for number in numbers ): raise ValueError('numbers must be an iterable of integers' ) lowercase : List[Any] = numbers[0] for i in range(1 , len(_UpperCAmelCase ) ): # update the maximum and minimum subarray products lowercase : List[Any] = numbers[i] if number < 0: lowercase : str = min_till_now, max_till_now lowercase : int = max(_UpperCAmelCase , max_till_now * number ) lowercase : Tuple = min(_UpperCAmelCase , min_till_now * number ) # update the maximum product found till now lowercase : List[Any] = max(_UpperCAmelCase , _UpperCAmelCase ) return max_prod
360
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _UpperCamelCase: Any = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
53
0
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" SCREAMING_SNAKE_CASE__ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n" SCREAMING_SNAKE_CASE__ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): def _snake_case ( self ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ): raise ImportWarning( """To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n""" """You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[ """https://github.com/jhclark/tercom""", ] , ) def _snake_case ( self , lowercase , lowercase , lowercase = False , lowercase = False , lowercase = False , lowercase = False , ) -> Optional[int]: lowerCAmelCase = len(references[0] ) if any(len(lowercase ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) lowerCAmelCase = [[refs[i] for refs in references] for i in range(lowercase )] lowerCAmelCase = TER( normalized=lowercase , no_punct=lowercase , asian_support=lowercase , case_sensitive=lowercase , ) lowerCAmelCase = sb_ter.corpus_score(lowercase , lowercase ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
46
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A = logging.get_logger(__name__) __A = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class A ( __UpperCAmelCase , __UpperCAmelCase ): lowerCamelCase : Union[str, Any] = """convnextv2""" def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="gelu" , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=0.0 , lowerCamelCase__=224 , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> List[Any]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) lowercase__ = num_channels lowercase__ = patch_size lowercase__ = num_stages lowercase__ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes lowercase__ = [3, 3, 9, 3] if depths is None else depths lowercase__ = hidden_act lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = drop_path_rate lowercase__ = image_size lowercase__ = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] lowercase__ , lowercase__ = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
164
0
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
359
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple: """simple docstring""" def run_func(snake_case_ : Union[str, Any] ): @wraps(snake_case_ ) def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) @wraps(snake_case_ ) @tf.function(experimental_compile=snake_case_ ) def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ): return func(*snake_case_ , **snake_case_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]: """simple docstring""" _lowerCAmelCase = random.Random() _lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class __lowerCamelCase ( __lowercase ): __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = "TensorFlow" @property def A__ (self ): '''simple docstring''' return tf.__version__ def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_speed(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_inference ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase ) _lowerCAmelCase = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) _lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase ) return self._measure_memory(_train ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowerCamelCase , training=lowerCamelCase ) _lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) _lowerCAmelCase = ( hasattr(lowerCamelCase , """architectures""" ) and isinstance(config.architectures , lowerCamelCase ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model _lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] ) _lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ) _lowerCAmelCase = model_cls(lowerCamelCase ) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: _lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase ) # encoder-decoder has vocab size saved differently _lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size _lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0] _lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables ) return gradients _lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def A__ (self , lowerCamelCase ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(lowerCamelCase , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _lowerCAmelCase = timeit.repeat( lowerCamelCase , repeat=self.args.repeat , number=10 , ) return min(lowerCamelCase ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) def A__ (self , lowerCamelCase ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) _lowerCAmelCase = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) _lowerCAmelCase = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() _lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase ) _lowerCAmelCase = meminfo.used _lowerCAmelCase = Memory(lowerCamelCase ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) _lowerCAmelCase = None else: _lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase ) _lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes if self.args.trace_memory_line_by_line: _lowerCAmelCase = stop_memory_tracing(lowerCamelCase ) if memory is None: _lowerCAmelCase = summary.total else: _lowerCAmelCase = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""" ) return "N/A", None
317
0
'''simple docstring''' from math import factorial def a__ ( lowercase : int, lowercase : int, lowercase : float ) -> float: """simple docstring""" if successes > trials: raise ValueError('''successes must be lower or equal to trials''' ) if trials < 0 or successes < 0: raise ValueError('''the function is defined for non-negative integers''' ) if not isinstance(lowercase, lowercase ) or not isinstance(lowercase, lowercase ): raise ValueError('''the function is defined for non-negative integers''' ) if not 0 < prob < 1: raise ValueError('''prob has to be in range of 1 - 0''' ) _UpperCamelCase = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! _UpperCamelCase = float(factorial(lowercase ) ) coefficient /= factorial(lowercase ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('Probability of 2 successes out of 4 trails') print('with probability of 0.75 is:', end=' ') print(binomial_distribution(2, 4, 0.75))
324
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_pad def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str: '''simple docstring''' if not batched: _UpperCamelCase = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image ): _UpperCamelCase , _UpperCamelCase = image.size else: _UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2] if w < h: _UpperCamelCase = int(self.size['''shortest_edge'''] * h / w ) _UpperCamelCase = self.size['''shortest_edge'''] elif w > h: _UpperCamelCase = self.size['''shortest_edge'''] _UpperCamelCase = int(self.size['''shortest_edge'''] * w / h ) else: _UpperCamelCase = self.size['''shortest_edge'''] _UpperCamelCase = self.size['''shortest_edge'''] else: _UpperCamelCase = [] for image in image_inputs: _UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0] _UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = DeformableDetrImageProcessingTester(self ) @property def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) _UpperCamelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' pass def snake_case__ ( self : int ) -> Any: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : str ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case__ ( self : int ) -> Tuple: '''simple docstring''' _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _UpperCamelCase = json.loads(f.read() ) _UpperCamelCase = {'''image_id''': 39769, '''annotations''': target} # encode them _UpperCamelCase = DeformableDetrImageProcessor() _UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' ) # verify pixel values _UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) ) # verify area _UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) ) # verify boxes _UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) ) # verify image_id _UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) ) # verify is_crowd _UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) ) # verify class_labels _UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) ) # verify orig_size _UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) ) # verify size _UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) ) @slow def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _UpperCamelCase = json.loads(f.read() ) _UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} _UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' ) # verify pixel values _UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) ) # verify area _UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) ) # verify boxes _UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) ) # verify image_id _UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) ) # verify is_crowd _UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) ) # verify class_labels _UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) ) # verify masks _UpperCamelCase = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ ) # verify orig_size _UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) ) # verify size _UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
324
1
'''simple docstring''' from __future__ import annotations def _a ( _lowercase : int , _lowercase : int ): '''simple docstring''' __UpperCAmelCase : list[list[int]] = [] create_all_state(1 , _lowercase , _lowercase , [] , _lowercase ) return result def _a ( _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : list[int] , _lowercase : list[list[int]] , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_lowercase , total_number - level + 2 ): current_list.append(_lowercase ) create_all_state(i + 1 , _lowercase , level - 1 , _lowercase , _lowercase ) current_list.pop() def _a ( _lowercase : list[list[int]] ): '''simple docstring''' for i in total_list: print(*_lowercase ) if __name__ == "__main__": __UpperCAmelCase :Any = 4 __UpperCAmelCase :int = 2 __UpperCAmelCase :int = generate_all_combinations(n, k) print_all_state(total_list)
240
'''simple docstring''' def _a ( _lowercase : List[str] ): '''simple docstring''' __UpperCAmelCase : str = 1 __UpperCAmelCase : List[str] = 2 while i * i <= n: __UpperCAmelCase : Optional[Any] = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def _a ( ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : List[Any] = 1 while True: i += 1 t_num += i if count_divisors(_lowercase ) > 500: break return t_num if __name__ == "__main__": print(solution())
240
1
'''simple docstring''' import operator as op lowerCamelCase__ = 'scaler.pt' lowerCamelCase__ = 'pytorch_model' lowerCamelCase__ = 'random_states' lowerCamelCase__ = 'optimizer' lowerCamelCase__ = 'scheduler' lowerCamelCase__ = 'pytorch_model.bin' lowerCamelCase__ = 'pytorch_model.bin.index.json' lowerCamelCase__ = 'model.safetensors' lowerCamelCase__ = 'model.safetensors.index.json' lowerCamelCase__ = '1.10.2' lowerCamelCase__ = 'py38' lowerCamelCase__ = '4.17.0' lowerCamelCase__ = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge'] lowerCamelCase__ = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2'] lowerCamelCase__ = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP'] lowerCamelCase__ = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH'] lowerCamelCase__ = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] lowerCamelCase__ = '2.0.1' lowerCamelCase__ = ['pdsh', 'standard', 'openmpi', 'mvapich'] lowerCamelCase__ = ['default', 'reduce-overhead', 'max-autotune'] lowerCamelCase__ = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowerCamelCase__ = [ 'nnodes', 'nproc_per_node', 'rdzv_backend', 'rdzv_endpoint', 'rdzv_id', 'rdzv_conf', 'standalone', 'max_restarts', 'monitor_interval', 'start_method', 'role', 'module', 'm', 'no_python', 'run_path', 'log_dir', 'r', 'redirects', 't', 'tee', 'node_rank', 'master_addr', 'master_port', ] lowerCamelCase__ = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM'] lowerCamelCase__ = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
234
'''simple docstring''' def __lowerCAmelCase (): return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __lowerCAmelCase (__lowerCAmelCase ): assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Any = 0 _UpperCAmelCase : str = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _UpperCAmelCase : Union[str, Any] = (left + right) // 2 _UpperCAmelCase : List[str] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _UpperCAmelCase : Tuple = mid + 1 else: _UpperCAmelCase : Optional[Any] = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : str = 0 _UpperCAmelCase : int = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): _UpperCAmelCase : Dict = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def __lowerCAmelCase (__lowerCAmelCase ): return len([number for row in grid for number in row if number < 0] ) def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Tuple = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def __lowerCAmelCase (): from timeit import timeit print("Running benchmarks" ) _UpperCAmelCase : Tuple = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _UpperCAmelCase : str = timeit(F"""{func}(grid=grid)""" , setup=__lowerCAmelCase , number=500 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
234
1
"""simple docstring""" import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _A ( lowercase ): """simple docstring""" return 1 / (1 + np.exp(-z )) def _A ( lowercase , lowercase ): """simple docstring""" return (-y * np.log(_UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean() def _A ( lowercase , lowercase , lowercase ): """simple docstring""" a =np.dot(_UpperCamelCase , _UpperCamelCase ) return np.sum(y * scores - np.log(1 + np.exp(_UpperCamelCase ) ) ) def _A ( lowercase , lowercase , lowercase , lowercase=7_00_00 ): """simple docstring""" a =np.zeros(x.shape[1] ) for iterations in range(_UpperCamelCase ): a =np.dot(_UpperCamelCase , _UpperCamelCase ) a =sigmoid_function(_UpperCamelCase ) a =np.dot(x.T , h - y ) / y.size a =theta - alpha * gradient # updating the weights a =np.dot(_UpperCamelCase , _UpperCamelCase ) a =sigmoid_function(_UpperCamelCase ) a =cost_function(_UpperCamelCase , _UpperCamelCase ) if iterations % 1_00 == 0: print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": lowerCamelCase_ : Optional[Any] = datasets.load_iris() lowerCamelCase_ : List[Any] = iris.data[:, :2] lowerCamelCase_ : List[str] = (iris.target != 0) * 1 lowerCamelCase_ : Tuple = 0.1 lowerCamelCase_ : Any = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print("""theta: """, theta) # printing the theta i.e our weights vector def _A ( lowercase ): """simple docstring""" return sigmoid_function( np.dot(_UpperCamelCase , _UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""") (lowerCamelCase_) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max()) (lowerCamelCase_) : Dict = (x[:, 1].min(), x[:, 1].max()) (lowerCamelCase_) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) lowerCamelCase_ : int = np.c_[xxa.ravel(), xxa.ravel()] lowerCamelCase_ : Any = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""") plt.legend() plt.show()
368
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase_ : List[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = ["pixel_values"] def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , __A = True , **__A , ) -> None: super().__init__(**__A ) a =size if size is not None else {'''shortest_edge''': 224} a =get_size_dict(__A , default_to_square=__A ) a =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} a =get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' ) a =do_resize a =size a =resample a =do_center_crop a =crop_size a =do_rescale a =rescale_factor a =do_normalize a =image_mean if image_mean is not None else OPENAI_CLIP_MEAN a =image_std if image_std is not None else OPENAI_CLIP_STD a =do_convert_rgb def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray: a =get_size_dict(__A , default_to_square=__A ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) a =get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A ) return resize(__A , size=__A , resample=__A , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> np.ndarray: a =get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> Any: return rescale(__A , scale=__A , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray: return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image: a =do_resize if do_resize is not None else self.do_resize a =size if size is not None else self.size a =get_size_dict(__A , param_name='''size''' , default_to_square=__A ) a =resample if resample is not None else self.resample a =do_center_crop if do_center_crop is not None else self.do_center_crop a =crop_size if crop_size is not None else self.crop_size a =get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A ) a =do_rescale if do_rescale is not None else self.do_rescale a =rescale_factor if rescale_factor is not None else self.rescale_factor a =do_normalize if do_normalize is not None else self.do_normalize a =image_mean if image_mean is not None else self.image_mean a =image_std if image_std is not None else self.image_std a =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb a =make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: a =[convert_to_rgb(__A ) for image in images] # All transformations expect numpy arrays. a =[to_numpy_array(__A ) for image in images] if do_resize: a =[self.resize(image=__A , size=__A , resample=__A ) for image in images] if do_center_crop: a =[self.center_crop(image=__A , size=__A ) for image in images] if do_rescale: a =[self.rescale(image=__A , scale=__A ) for image in images] if do_normalize: a =[self.normalize(image=__A , mean=__A , std=__A ) for image in images] a =[to_channel_dimension_format(__A , __A ) for image in images] a ={'''pixel_values''': images} return BatchFeature(data=__A , tensor_type=__A )
215
0
from math import factorial UpperCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)} def UpperCAmelCase_ ( __snake_case ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__snake_case ) ) def UpperCAmelCase_ ( __snake_case = 60 , __snake_case = 1000000 ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not isinstance(__snake_case , __snake_case ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length _lowercase =0 # the cached sizes of the previous chains _lowercase ={} for start_chain_element in range(1 , __snake_case ): # The temporary set will contain the elements of the chain _lowercase =set() _lowercase =0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. _lowercase =start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__snake_case ) chain_set_length += 1 _lowercase =digit_factorial_sum(__snake_case ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] _lowercase =chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f'''{solution()}''')
5
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1 def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
5
1
'''simple docstring''' import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class a_ ( unittest.TestCase ): def __init__( self : Union[str, Any] , lowercase : Any , lowercase : int=13 , lowercase : Dict=7 , lowercase : Union[str, Any]=True , lowercase : Optional[Any]=True , lowercase : List[Any]=True , lowercase : Tuple=True , lowercase : Union[str, Any]=99 , lowercase : Dict=32 , lowercase : Dict=5 , lowercase : str=4 , lowercase : Optional[int]=37 , lowercase : str="gelu" , lowercase : Any=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[Any]=512 , lowercase : Any=16 , lowercase : Optional[int]=2 , lowercase : str=0.02 , lowercase : Union[str, Any]=4 , ): """simple docstring""" lowercase_ :List[Any] = parent lowercase_ :Optional[int] = batch_size lowercase_ :List[Any] = seq_length lowercase_ :int = is_training lowercase_ :int = use_attention_mask lowercase_ :List[Any] = use_token_type_ids lowercase_ :Tuple = use_labels lowercase_ :Dict = vocab_size lowercase_ :Tuple = hidden_size lowercase_ :List[str] = num_hidden_layers lowercase_ :int = num_attention_heads lowercase_ :Union[str, Any] = intermediate_size lowercase_ :str = hidden_act lowercase_ :Optional[int] = hidden_dropout_prob lowercase_ :Optional[int] = attention_probs_dropout_prob lowercase_ :List[Any] = max_position_embeddings lowercase_ :Union[str, Any] = type_vocab_size lowercase_ :str = type_sequence_label_size lowercase_ :Tuple = initializer_range lowercase_ :Optional[Any] = num_choices def lowercase__ ( self : Dict ): """simple docstring""" lowercase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ :Optional[int] = None if self.use_attention_mask: lowercase_ :Dict = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ :Optional[int] = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__a , ) return config, input_ids, attention_mask def lowercase__ ( self : Any ): """simple docstring""" lowercase_ :Tuple = self.prepare_config_and_inputs() lowercase_ :Optional[int] = config_and_inputs lowercase_ :List[str] = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class a_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): __A = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def lowercase__ ( self : Dict ): """simple docstring""" lowercase_ :Optional[int] = FlaxDistilBertModelTester(self ) @slow def lowercase__ ( self : Tuple ): """simple docstring""" for model_class_name in self.all_model_classes: lowercase_ :Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" ) lowercase_ :List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__a ) @require_flax class a_ ( unittest.TestCase ): @slow def lowercase__ ( self : Any ): """simple docstring""" lowercase_ :Optional[int] = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" ) lowercase_ :Tuple = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) lowercase_ :Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase_ :str = model(__a , attention_mask=__a )[0] lowercase_ :List[str] = (1, 11, 768) self.assertEqual(output.shape , __a ) lowercase_ :Dict = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
350
'''simple docstring''' import sys lowerCAmelCase : List[Any] =( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def UpperCAmelCase_ ( __lowerCamelCase : str ): lowercase_ :List[str] = 1 for digit in s: product *= int(__lowerCamelCase ) return product def UpperCAmelCase_ ( __lowerCamelCase : str = N ): lowercase_ :Any = -sys.maxsize - 1 lowercase_ :int = n[:13] lowercase_ :Optional[int] = 13 while cur_index < len(__lowerCamelCase ) - 13: if int(n[cur_index] ) >= int(substr[0] ): lowercase_ :str = substr[1:] + n[cur_index] cur_index += 1 else: lowercase_ :List[str] = max(__lowerCamelCase ,str_eval(__lowerCamelCase ) ) lowercase_ :Any = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
147
0
'''simple docstring''' def __lowerCamelCase ( A__ , A__ ) -> float: """simple docstring""" if mass < 0: raise ValueError('The mass of a body cannot be negative' ) return 0.5 * mass * abs(A__ ) * abs(A__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
28
from __future__ import annotations def a ( A__ : list[int] ) -> int: """simple docstring""" if not nums: return 0 _lowercase =nums[0] _lowercase =0 for num in nums[1:]: _lowercase , _lowercase =( max_excluding + num, max(A__ , A__ ), ) return max(A__ , A__ ) if __name__ == "__main__": import doctest doctest.testmod()
205
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer _snake_case = logging.get_logger(__name__) _snake_case = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _snake_case = { 'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'}, 'tokenizer_file': { 'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json' }, } _snake_case = {'mobilebert-uncased': 512} _snake_case = {} class a__ ( lowerCamelCase_ ): _SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Any = PRETRAINED_INIT_CONFIGURATION _SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[str] = MobileBertTokenizer def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ): """simple docstring""" super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) _lowercase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars ): _lowercase : int = getattr(_UpperCamelCase , normalizer_state.pop("type" ) ) _lowercase : Union[str, Any] = do_lower_case _lowercase : Optional[int] = strip_accents _lowercase : Dict = tokenize_chinese_chars _lowercase : List[str] = normalizer_class(**_UpperCamelCase ) _lowercase : Dict = do_lower_case def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=None ): """simple docstring""" _lowercase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ): """simple docstring""" _lowercase : Any = [self.sep_token_id] _lowercase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ): """simple docstring""" _lowercase : Dict = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
356
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _snake_case = logging.get_logger(__name__) class a__ ( lowerCamelCase_ ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" warnings.warn( "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ChineseCLIPImageProcessor instead." , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
199
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ : Any = logging.get_logger(__name__) def lowerCamelCase__ ( a , a=False , a=False , a=False ) -> Any: _A: Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def lowerCamelCase__ ( a , a ) -> int: for i in range(config.num_hidden_layers ): _A: Optional[Any] = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A: str = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" ) _A: Tuple = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _A: str = in_proj_weight[ : config.hidden_size, : ] _A: Dict = in_proj_bias[: config.hidden_size] _A: Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A: List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A: Optional[int] = in_proj_weight[ -config.hidden_size :, : ] _A: List[Any] = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( a ) -> Any: _A: Dict = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a , a ) def lowerCamelCase__ ( a , a , a ) -> Union[str, Any]: _A: Optional[Any] = dct.pop(a ) _A: str = val @torch.no_grad() def lowerCamelCase__ ( a , a ) -> Dict: _A: List[str] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=a ) _A: Dict = False _A: Tuple = False _A: Optional[Any] = False _A: Dict = False if "vqa" in checkpoint_url: _A: List[Any] = True _A: int = 31_29 _A: List[str] = '''huggingface/label-files''' _A: List[str] = '''vqa2-id2label.json''' _A: Optional[Any] = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) _A: Any = {int(a ): v for k, v in idalabel.items()} _A: List[str] = idalabel _A: Tuple = {v: k for k, v in idalabel.items()} _A: str = ViltForQuestionAnswering(a ) elif "nlvr" in checkpoint_url: _A: Any = True _A: List[Any] = 2 _A: Any = {0: '''False''', 1: '''True'''} _A: Optional[int] = {v: k for k, v in config.idalabel.items()} _A: List[str] = 3 _A: Optional[Any] = ViltForImagesAndTextClassification(a ) elif "irtr" in checkpoint_url: _A: Dict = True _A: Optional[Any] = ViltForImageAndTextRetrieval(a ) elif "mlm_itm" in checkpoint_url: _A: Union[str, Any] = True _A: List[str] = ViltForMaskedLM(a ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys _A: Any = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )['''state_dict'''] _A: str = create_rename_keys(a , a , a , a ) for src, dest in rename_keys: rename_key(a , a , a ) read_in_q_k_v(a , a ) if mlm_model or irtr_model: _A: List[Any] = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(a , a ) # load state dict into HuggingFace model model.eval() if mlm_model: _A , _A: int = model.load_state_dict(a , strict=a ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(a ) # Define processor _A: Any = ViltImageProcessor(size=3_84 ) _A: Any = BertTokenizer.from_pretrained('''bert-base-uncased''' ) _A: Union[str, Any] = ViltProcessor(a , a ) # Forward pass on example inputs (image + text) if nlvr_model: _A: Union[str, Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=a ).raw ) _A: List[Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=a ).raw ) _A: Tuple = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) _A: Union[str, Any] = processor(a , a , return_tensors='''pt''' ) _A: Dict = processor(a , a , return_tensors='''pt''' ) _A: Tuple = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: _A: List[str] = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=a ).raw ) if mlm_model: _A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].''' else: _A: int = '''How many cats are there?''' _A: Union[str, Any] = processor(a , a , return_tensors='''pt''' ) _A: Optional[int] = model(**a ) # Verify outputs if mlm_model: _A: Dict = torch.Size([1, 11, 3_05_22] ) _A: Optional[Any] = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1E-4 ) # verify masked token prediction equals "cats" _A: int = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: _A: Optional[Any] = torch.Size([1, 31_29] ) _A: List[str] = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1E-4 ) # verify vqa prediction equals "2" _A: List[Any] = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: _A: List[Any] = torch.Size([1, 2] ) _A: List[Any] = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(a ).mkdir(exist_ok=a ) print(f"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(a ) processor.save_pretrained(a ) if __name__ == "__main__": UpperCAmelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase__ : Optional[Any] = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
121
def lowerCamelCase__ ( a , a ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _A: Union[str, Any] = str(bin(a ) )[2:] # remove the leading "0b" _A: Union[str, Any] = str(bin(a ) )[2:] # remove the leading "0b" _A: Optional[int] = max(len(a ) , len(a ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(a ) , b_binary.zfill(a ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
121
1
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowercase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self ) -> str: """simple docstring""" UpperCAmelCase__ = tempfile.mkdtemp() # fmt: off UpperCAmelCase__ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) UpperCAmelCase__ = { 'do_resize': True, 'size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } UpperCAmelCase__ = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(__a , __a ) def UpperCamelCase__ (self , **__a ) -> List[str]: """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **__a ) def UpperCamelCase__ (self , **__a ) -> int: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a ) def UpperCamelCase__ (self ) -> Any: """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ (self ) -> str: """simple docstring""" UpperCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCAmelCase__ = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = self.get_tokenizer() UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def UpperCamelCase__ (self ) -> int: """simple docstring""" UpperCAmelCase__ = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) UpperCAmelCase__ = self.get_image_processor(do_normalize=__a , padding_value=1.0 ) UpperCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = self.get_tokenizer() UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) UpperCAmelCase__ = self.prepare_image_inputs() UpperCAmelCase__ = image_processor(__a , return_tensors='np' ) UpperCAmelCase__ = processor(images=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = self.get_tokenizer() UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) UpperCAmelCase__ = 'lower newer' UpperCAmelCase__ = processor(text=__a ) UpperCAmelCase__ = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ (self ) -> int: """simple docstring""" UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = self.get_tokenizer() UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) UpperCAmelCase__ = 'lower newer' UpperCAmelCase__ = self.prepare_image_inputs() UpperCAmelCase__ = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(__a ): processor() def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = self.get_tokenizer() UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase__ = processor.batch_decode(__a ) UpperCAmelCase__ = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = self.get_tokenizer() UpperCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) UpperCAmelCase__ = 'lower newer' UpperCAmelCase__ = self.prepare_image_inputs() UpperCAmelCase__ = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
335
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class lowercase : '''simple docstring''' def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple: """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = 13 UpperCAmelCase__ = 7 UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = 99 UpperCAmelCase__ = 384 UpperCAmelCase__ = 2 UpperCAmelCase__ = 4 UpperCAmelCase__ = 37 UpperCAmelCase__ = 'gelu' UpperCAmelCase__ = 0.1 UpperCAmelCase__ = 0.1 UpperCAmelCase__ = 512 UpperCAmelCase__ = 16 UpperCAmelCase__ = 2 UpperCAmelCase__ = 0.02 UpperCAmelCase__ = 3 UpperCAmelCase__ = 4 UpperCAmelCase__ = 128 UpperCAmelCase__ = 2 UpperCAmelCase__ = 9 UpperCAmelCase__ = 1 UpperCAmelCase__ = None def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ = None if self.use_input_mask: UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ = None if self.use_token_type_ids: UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple: """simple docstring""" UpperCAmelCase__ = TFConvBertModel(config=__a ) UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} UpperCAmelCase__ = [input_ids, input_mask] UpperCAmelCase__ = model(__a ) UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any: """simple docstring""" UpperCAmelCase__ = TFConvBertForMaskedLM(config=__a ) UpperCAmelCase__ = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__a ) UpperCAmelCase__ = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = self.num_choices UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__a ) UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = TFConvBertForTokenClassification(config=__a ) UpperCAmelCase__ = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__a ) UpperCAmelCase__ = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) = config_and_inputs UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) __SCREAMING_SNAKE_CASE = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = TFConvBertModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 ) def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ (self ) -> str: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__a ) def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a ) def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a ) def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = True UpperCAmelCase__ = True if hasattr(__a , 'use_cache' ): UpperCAmelCase__ = True UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a ) for model_class in self.all_model_classes: UpperCAmelCase__ = self._prepare_for_class(__a , __a ) UpperCAmelCase__ = model_class(__a ) UpperCAmelCase__ = len(model(__a ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a , saved_model=__a ) UpperCAmelCase__ = os.path.join(__a , 'saved_model' , '1' ) UpperCAmelCase__ = tf.keras.models.load_model(__a ) UpperCAmelCase__ = model(__a ) if self.is_encoder_decoder: UpperCAmelCase__ = outputs['encoder_hidden_states'] UpperCAmelCase__ = outputs['encoder_attentions'] else: UpperCAmelCase__ = outputs['hidden_states'] UpperCAmelCase__ = outputs['attentions'] self.assertEqual(len(__a ) , __a ) UpperCAmelCase__ = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__a ) , __a ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(__a ) def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = True UpperCAmelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a ) UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a ) def check_decoder_attentions_output(__a ): UpperCAmelCase__ = len(__a ) self.assertEqual(out_len % 2 , 0 ) UpperCAmelCase__ = outputs.decoder_attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(__a ): UpperCAmelCase__ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = model_class(__a ) UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) ) UpperCAmelCase__ = len(__a ) self.assertEqual(config.output_hidden_states , __a ) check_encoder_attentions_output(__a ) if self.is_encoder_decoder: UpperCAmelCase__ = model_class(__a ) UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) ) self.assertEqual(config.output_hidden_states , __a ) check_decoder_attentions_output(__a ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCAmelCase__ = True UpperCAmelCase__ = model_class(__a ) UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) ) self.assertEqual(config.output_hidden_states , __a ) check_encoder_attentions_output(__a ) # Check attention is always last and order is fine UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = model_class(__a ) UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) ) self.assertEqual(model.config.output_hidden_states , __a ) check_encoder_attentions_output(__a ) @require_tf class lowercase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase__ (self ) -> int: """simple docstring""" UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase__ = model(__a )[0] UpperCAmelCase__ = [1, 6, 768] self.assertEqual(output.shape , __a ) UpperCAmelCase__ = tf.constant( [ [ [-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32], [0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24], [0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
335
1
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCAmelCase_ (__a : str , __a : str ): """simple docstring""" _a : Dict = list(__a ) _a : int = list(__a ) _a : Dict = 0 for i in range(len(__a ) ): if lista[i] != lista[i]: count += 1 _a : Any = '_' if count > 1: return False else: return "".join(__a ) def UpperCAmelCase_ (__a : list[str] ): """simple docstring""" _a : List[str] = [] while True: _a : Any = ['$'] * len(__a ) _a : Optional[int] = [] for i in range(len(__a ) ): for j in range(i + 1 , len(__a ) ): _a : Union[str, Any] = compare_string(binary[i] , binary[j] ) if k is False: _a : Any = '*' _a : int = '*' temp.append('X' ) for i in range(len(__a ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__a ) == 0: return pi _a : List[Any] = list(set(__a ) ) def UpperCAmelCase_ (__a : int , __a : Sequence[float] ): """simple docstring""" _a : Dict = [] for minterm in minterms: _a : str = '' for _ in range(__a ): _a : Optional[int] = str(minterm % 2 ) + string minterm //= 2 temp.append(__a ) return temp def UpperCAmelCase_ (__a : str , __a : str , __a : int ): """simple docstring""" _a : Union[str, Any] = list(__a ) _a : Optional[Any] = list(__a ) _a : int = 0 for i in range(len(__a ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCAmelCase_ (__a : list[list[int]] , __a : list[str] ): """simple docstring""" _a : int = [] _a : List[Any] = [0] * len(__a ) for i in range(len(chart[0] ) ): _a : Any = 0 _a : Union[str, Any] = -1 for j in range(len(__a ) ): if chart[j][i] == 1: count += 1 _a : List[Any] = j if count == 1: _a : Any = 1 for i in range(len(__a ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__a ) ): _a : Dict = 0 temp.append(prime_implicants[i] ) while True: _a : str = 0 _a : Union[str, Any] = -1 _a : Optional[Any] = 0 for i in range(len(__a ) ): _a : List[str] = chart[i].count(1 ) if count_n > max_n: _a : str = count_n _a : int = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__a ) ): _a : Optional[int] = 0 def UpperCAmelCase_ (__a : list[str] , __a : list[str] ): """simple docstring""" _a : int = [[0 for x in range(len(__a ) )] for x in range(len(__a ) )] for i in range(len(__a ) ): _a : Dict = prime_implicants[i].count('_' ) for j in range(len(__a ) ): if is_for_table(prime_implicants[i] , binary[j] , __a ): _a : Union[str, Any] = 1 return chart def UpperCAmelCase_ (): """simple docstring""" _a : Dict = int(input('Enter the no. of variables\n' ) ) _a : str = [ float(__a ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _a : List[Any] = decimal_to_binary(__a , __a ) _a : Tuple = check(__a ) print('Prime Implicants are:' ) print(__a ) _a : Union[str, Any] = prime_implicant_chart(__a , __a ) _a : List[str] = selection(__a , __a ) print('Essential Prime Implicants are:' ) print(__a ) if __name__ == "__main__": import doctest doctest.testmod() main()
271
'''simple docstring''' from __future__ import annotations __lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0] __lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1] def UpperCAmelCase_ (__a : list[float] ): """simple docstring""" _a : Optional[int] = [] _a : int = len(__a ) for i in range(__a ): _a : float = -1 for j in range(i + 1 , __a ): if arr[i] < arr[j]: _a : Any = arr[j] break result.append(__a ) return result def UpperCAmelCase_ (__a : list[float] ): """simple docstring""" _a : Tuple = [] for i, outer in enumerate(__a ): _a : float = -1 for inner in arr[i + 1 :]: if outer < inner: _a : Dict = inner break result.append(__a ) return result def UpperCAmelCase_ (__a : list[float] ): """simple docstring""" _a : int = len(__a ) _a : list[float] = [] _a : list[float] = [-1] * arr_size for index in reversed(range(__a ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: _a : Dict = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) __lowerCAmelCase = ( """from __main__ import arr, next_greatest_element_slow, """ """next_greatest_element_fast, next_greatest_element""" ) print( """next_greatest_element_slow():""", timeit("""next_greatest_element_slow(arr)""", setup=setup), ) print( """next_greatest_element_fast():""", timeit("""next_greatest_element_fast(arr)""", setup=setup), ) print( """ next_greatest_element():""", timeit("""next_greatest_element(arr)""", setup=setup), )
271
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class snake_case_ ( __A ,unittest.TestCase ): __A : List[Any] = ShapEPipeline __A : Tuple = ["prompt"] __A : Any = ["prompt"] __A : Union[str, Any] = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] __A : Union[str, Any] = False @property def __UpperCamelCase ( self : Dict ) -> int: return 32 @property def __UpperCamelCase ( self : Union[str, Any] ) -> int: return 32 @property def __UpperCamelCase ( self : Dict ) -> Optional[int]: return self.time_input_dim * 4 @property def __UpperCamelCase ( self : List[str] ) -> Optional[int]: return 8 @property def __UpperCamelCase ( self : List[Any] ) -> Any: lowercase__ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: torch.manual_seed(0 ) lowercase__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(lowercase_ ) @property def __UpperCamelCase ( self : Optional[int] ) -> Tuple: torch.manual_seed(0 ) lowercase__ : List[str] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } lowercase__ : List[str] = PriorTransformer(**lowercase_ ) return model @property def __UpperCamelCase ( self : int ) -> str: torch.manual_seed(0 ) lowercase__ : Any = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } lowercase__ : List[Any] = ShapERenderer(**lowercase_ ) return model def __UpperCamelCase ( self : Any ) -> Tuple: lowercase__ : str = self.dummy_prior lowercase__ : int = self.dummy_text_encoder lowercase__ : Optional[Any] = self.dummy_tokenizer lowercase__ : Any = self.dummy_renderer lowercase__ : List[str] = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=lowercase_ , clip_sample=lowercase_ , clip_sample_range=1.0 , ) lowercase__ : Optional[int] = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[Any]=0 ) -> Tuple: if str(lowercase_ ).startswith("mps" ): lowercase__ : Dict = torch.manual_seed(lowercase_ ) else: lowercase__ : Dict = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase__ : Tuple = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def __UpperCamelCase ( self : Dict ) -> Any: lowercase__ : List[str] = "cpu" lowercase__ : List[str] = self.get_dummy_components() lowercase__ : Union[str, Any] = self.pipeline_class(**lowercase_ ) lowercase__ : Optional[int] = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : Optional[int] = pipe(**self.get_dummy_inputs(lowercase_ ) ) lowercase__ : Union[str, Any] = output.images[0] lowercase__ : int = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase__ : Optional[Any] = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCamelCase ( self : List[str] ) -> Dict: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __UpperCamelCase ( self : List[Any] ) -> List[str]: lowercase__ : List[str] = torch_device == "cpu" lowercase__ : Optional[int] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowercase_ , relax_max_difference=lowercase_ , ) def __UpperCamelCase ( self : Any ) -> Union[str, Any]: lowercase__ : Union[str, Any] = self.get_dummy_components() lowercase__ : Optional[Any] = self.pipeline_class(**lowercase_ ) lowercase__ : List[str] = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : Any = 1 lowercase__ : Tuple = 2 lowercase__ : Tuple = self.get_dummy_inputs(lowercase_ ) for key in inputs.keys(): if key in self.batch_params: lowercase__ : Any = batch_size * [inputs[key]] lowercase__ : Any = pipe(**lowercase_ , num_images_per_prompt=lowercase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : Any ) -> List[str]: lowercase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy" ) lowercase__ : Union[str, Any] = ShapEPipeline.from_pretrained("openai/shap-e" ) lowercase__ : Optional[int] = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : Optional[int] = torch.Generator(device=lowercase_ ).manual_seed(0 ) lowercase__ : Any = pipe( "a shark" , generator=lowercase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowercase_ , lowercase_ )
333
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowercase_ ( _lowerCamelCase : List[str]): return 1 / (1 + np.exp(-z)) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple): return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase))) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000): lowercase__ : Optional[int] = np.zeros(x.shape[1]) for iterations in range(_lowerCamelCase): lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Tuple = sigmoid_function(_lowerCamelCase) lowercase__ : Dict = np.dot(x.T , h - y) / y.size lowercase__ : int = theta - alpha * gradient # updating the weights lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase) lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase) lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase) if iterations % 100 == 0: print(f'''loss: {j} \t''') # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase = datasets.load_iris() UpperCamelCase = iris.data[:, :2] UpperCamelCase = (iris.target != 0) * 1 UpperCamelCase = 0.1 UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000) print('''theta: ''', theta) # printing the theta i.e our weights vector def lowercase_ ( _lowerCamelCase : List[Any]): return sigmoid_function( np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
333
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = "encoder-decoder" SCREAMING_SNAKE_CASE : List[Any] = True def __init__( self : Tuple , **_UpperCamelCase : List[str] ) ->Dict: super().__init__(**_UpperCamelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" snake_case_ = kwargs.pop('''encoder''' ) snake_case_ = encoder_config.pop('''model_type''' ) snake_case_ = kwargs.pop('''decoder''' ) snake_case_ = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig snake_case_ = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = True @classmethod def snake_case__( cls : int , _UpperCamelCase : PretrainedConfig , _UpperCamelCase : PretrainedConfig , **_UpperCamelCase : Any ) ->PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) snake_case_ = True snake_case_ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCamelCase ) def snake_case__( self : List[str] ) ->Union[str, Any]: snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.encoder.to_dict() snake_case_ = self.decoder.to_dict() snake_case_ = self.__class__.model_type return output
8
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"] def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any: requires_backends(self , ['''note_seq'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int: requires_backends(cls , ['''note_seq'''] ) @classmethod def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]: requires_backends(cls , ['''note_seq'''] )
8
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json', 'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json', 'uclanlp/visualbert-vqa-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json', 'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json', 'uclanlp/visualbert-vcr-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json' ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class lowercase__ ( __lowerCamelCase ): '''simple docstring''' a : List[str] = "visual_bert" def __init__( self, __magic_name__=30522, __magic_name__=768, __magic_name__=512, __magic_name__=12, __magic_name__=12, __magic_name__=3072, __magic_name__="gelu", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=512, __magic_name__=2, __magic_name__=0.02, __magic_name__=1E-12, __magic_name__=False, __magic_name__=True, __magic_name__=1, __magic_name__=0, __magic_name__=2, **__magic_name__, ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=__magic_name__, bos_token_id=__magic_name__, eos_token_id=__magic_name__, **__magic_name__ ) UpperCamelCase__ : int = vocab_size UpperCamelCase__ : Optional[Any] = max_position_embeddings UpperCamelCase__ : Optional[int] = hidden_size UpperCamelCase__ : Optional[Any] = visual_embedding_dim UpperCamelCase__ : str = num_hidden_layers UpperCamelCase__ : Union[str, Any] = num_attention_heads UpperCamelCase__ : int = intermediate_size UpperCamelCase__ : Union[str, Any] = hidden_act UpperCamelCase__ : str = hidden_dropout_prob UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob UpperCamelCase__ : Tuple = initializer_range UpperCamelCase__ : int = type_vocab_size UpperCamelCase__ : List[str] = layer_norm_eps UpperCamelCase__ : Optional[int] = bypass_transformer UpperCamelCase__ : Dict = special_visual_initialize
247
import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'): UpperCAmelCase_ = True from torch.cuda.amp import autocast UpperCAmelCase_ = logging.getLogger(__name__) @dataclass class lowercase__ : '''simple docstring''' a : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) a : Optional[str] = field( default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) a : Optional[bool] = field( default=__lowerCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) a : Optional[bool] = field( default=__lowerCamelCase , metadata={"help": "Whether to log verbose messages or not."} , ) a : Optional[float] = field( default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} ) a : Optional[float] = field( default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} ) a : Optional[float] = field( default=0.9_9_9_9_9_5 , metadata={"help": "Decay of gumbel temperature during training."} ) def lowerCAmelCase_ ( __UpperCAmelCase: ModelArguments , __UpperCAmelCase: TrainingArguments ) -> Any: logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCamelCase__ : Tuple = logging.WARNING if model_args.verbose_logging: UpperCamelCase__ : List[Any] = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): UpperCamelCase__ : Dict = logging.INFO logger.setLevel(__UpperCAmelCase ) @dataclass class lowercase__ : '''simple docstring''' a : str = field( default=__lowerCamelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) a : Optional[str] = field( default=__lowerCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) a : Optional[str] = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) a : Optional[str] = field( default="validation" , metadata={ "help": ( "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" ) } , ) a : Optional[str] = field( default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , ) a : bool = field( default=__lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) a : Optional[int] = field( default=1 , metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" } , ) a : Optional[int] = field( default=__lowerCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , ) a : Optional[float] = field( default=2_0.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} ) @dataclass class lowercase__ : '''simple docstring''' a : WavaVecaForPreTraining a : WavaVecaFeatureExtractor a : Union[bool, str] = "longest" a : Optional[int] = None a : Optional[int] = None def __call__( self, __magic_name__ ) -> Dict[str, torch.Tensor]: """simple docstring""" # reformat list to dict and set to pytorch format UpperCamelCase__ : List[Any] = self.feature_extractor.pad( __magic_name__, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', ) UpperCamelCase__ : Dict = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] ) UpperCamelCase__ : Union[str, Any] = batch['''input_values'''].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula UpperCamelCase__ : List[str] = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to( torch.long ) UpperCamelCase__ : Dict = torch.zeros( (batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['''input_values'''].device ) # these two operations makes sure that all values # before the output lengths indices are attended to UpperCamelCase__ : str = 1 UpperCamelCase__ : Union[str, Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices UpperCamelCase__ : Dict = _compute_mask_indices( (batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__magic_name__, min_masks=2, ) return batch class lowercase__ ( __lowerCamelCase ): '''simple docstring''' def __init__( self, *__magic_name__, __magic_name__=1, __magic_name__=0, __magic_name__=1.0, **__magic_name__ ) -> Dict: """simple docstring""" super().__init__(*__magic_name__, **__magic_name__ ) UpperCamelCase__ : Any = 0 UpperCamelCase__ : List[Any] = max_gumbel_temp UpperCamelCase__ : List[str] = min_gumbel_temp UpperCamelCase__ : Any = gumbel_temp_decay def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> torch.Tensor: """simple docstring""" model.train() UpperCamelCase__ : str = self._prepare_inputs(__magic_name__ ) if self.use_amp: with autocast(): UpperCamelCase__ : Optional[Any] = self.compute_loss(__magic_name__, __magic_name__ ) else: UpperCamelCase__ : Tuple = self.compute_loss(__magic_name__, __magic_name__ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": UpperCamelCase__ : Any = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": UpperCamelCase__ : List[str] = loss.sum() / (inputs['''mask_time_indices''']).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" ) if self.args.gradient_accumulation_steps > 1: UpperCamelCase__ : Tuple = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(__magic_name__ ).backward() elif self.use_apex: with amp.scale_loss(__magic_name__, self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(__magic_name__ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) ) return loss.detach() def lowerCAmelCase_ ( ) -> str: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = parser.parse_args_into_dataclasses() configure_logger(__UpperCAmelCase , __UpperCAmelCase ) # Downloading and loading a dataset from the hub. UpperCamelCase__ : str = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" UpperCamelCase__ : Any = DatasetDict() UpperCamelCase__ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , ) UpperCamelCase__ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" UpperCamelCase__ : int = DatasetDict() UpperCamelCase__ : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , ) UpperCamelCase__ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported UpperCamelCase__ : str = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__UpperCAmelCase ) def prepare_dataset(__UpperCAmelCase: Union[str, Any] ): # check that all files have the correct sampling rate UpperCamelCase__ ,UpperCamelCase__ : List[str] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays UpperCamelCase__ : Any = datasets.map( __UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names ) # filter audio files that are too long UpperCamelCase__ : Tuple = vectorized_datasets.filter( lambda __UpperCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(__UpperCAmelCase: Optional[int] ): return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` UpperCamelCase__ : Any = vectorized_datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 UpperCamelCase__ : int = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( '''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and''' ''' ``config.feat_extract_norm=\'layer\'''' ) UpperCamelCase__ : Optional[int] = WavaVecaForPreTraining(__UpperCAmelCase ) UpperCamelCase__ : List[str] = DataCollatorForWavaVecaPretraining(model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase ) UpperCamelCase__ : List[Any] = WavaVecaPreTrainer( model=__UpperCAmelCase , data_collator=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=__UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
247
1
'''simple docstring''' from __future__ import annotations from cmath import sqrt def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> tuple[complex, complex]: if a == 0: raise ValueError('''Coefficient \'a\' must not be zero.''' ) UpperCAmelCase : List[Any] = b * b - 4 * a * c UpperCAmelCase : int = (-b + sqrt(_lowerCAmelCase )) / (2 * a) UpperCAmelCase : List[Any] = (-b - sqrt(_lowerCAmelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def snake_case_ ( ) -> Union[str, Any]: UpperCAmelCase , UpperCAmelCase : List[str] = quadratic_roots(a=5 , b=6 , c=1 ) print(f"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
23
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCAmelCase__ = logging.get_logger(__name__) enable_full_determinism() class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = UNetaDModel _snake_case : List[str] = 'sample' @property def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : List[Any] ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Optional[Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = UNetaDModel _snake_case : Optional[Any] = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 4 _UpperCAmelCase = 4 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Optional[Any] ): return (4, 32, 32) @property def lowerCAmelCase_ ( self : Dict ): return (4, 32, 32) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model.to(__lowerCAmelCase ) _UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" ) def lowerCAmelCase_ ( self : str ): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase ) model_accelerate.to(__lowerCAmelCase ) model_accelerate.eval() _UpperCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) _UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase ) model_normal_load.to(__lowerCAmelCase ) model_normal_load.eval() _UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__lowerCAmelCase ) _UpperCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) _UpperCAmelCase = noise.to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) ) class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = UNetaDModel _snake_case : str = 'sample' @property def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def lowerCAmelCase_ ( self : Any ): return (3, 32, 32) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return (3, 32, 32) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1e-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) _UpperCAmelCase = self.dummy_input _UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase ) _UpperCAmelCase = noise _UpperCAmelCase = model(**__lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (256, 256) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__lowerCAmelCase ) _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) _UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase ) with torch.no_grad(): _UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample _UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] ) # fmt: on self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) def lowerCAmelCase_ ( self : List[str] ): # not required for this model pass
289
0
def _a ( SCREAMING_SNAKE_CASE : str ): """simple docstring""" UpperCamelCase__ : Tuple = hex_num.strip() if not hex_num: raise ValueError('''No value was passed to the function''' ) UpperCamelCase__ : Optional[Any] = hex_num[0] == '''-''' if is_negative: UpperCamelCase__ : Any = hex_num[1:] try: UpperCamelCase__ : Optional[int] = int(SCREAMING_SNAKE_CASE , 16 ) except ValueError: raise ValueError('''Invalid value was passed to the function''' ) UpperCamelCase__ : Any = '''''' while int_num > 0: UpperCamelCase__ : str = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('''-''' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
369
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __UpperCamelCase : List[Any] = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE : Union[tf.Tensor, np.ndarray] ): """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): return list(tensor.shape ) UpperCamelCase__ : List[Any] = tf.shape(SCREAMING_SNAKE_CASE ) if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE ): return dynamic UpperCamelCase__ : Optional[Any] = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE )] def _a ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[str] = None ): """simple docstring""" return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=1E-5 , SCREAMING_SNAKE_CASE : Any=-1 ): """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized UpperCamelCase__ , UpperCamelCase__ : Dict = tf.nn.moments(SCREAMING_SNAKE_CASE , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis UpperCamelCase__ : Tuple = [1] * inputs.shape.rank UpperCamelCase__ : List[str] = shape_list(SCREAMING_SNAKE_CASE )[axis] UpperCamelCase__ : Union[str, Any] = tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) UpperCamelCase__ : int = tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Compute layer normalization using the batch_normalization # function. UpperCamelCase__ : List[Any] = tf.nn.batch_normalization( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , variance_epsilon=SCREAMING_SNAKE_CASE , ) return outputs def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=0 , SCREAMING_SNAKE_CASE : Tuple=-1 ): """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input UpperCamelCase__ : Union[str, Any] = tf.shape(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) UpperCamelCase__ : Tuple = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : tf.Tensor ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ): UpperCamelCase__ : int = tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: UpperCamelCase__ : List[Any] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: UpperCamelCase__ : Any = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) UpperCamelCase__ : Dict = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def _a ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str = "input_ids" ): """simple docstring""" tf.debugging.assert_less( SCREAMING_SNAKE_CASE , tf.cast(SCREAMING_SNAKE_CASE , dtype=tensor.dtype ) , message=( F"The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE )}) must be smaller than the embedding " F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ) , ) def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" UpperCamelCase__ : str = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. UpperCamelCase__ : List[str] = [x for x in data if len(SCREAMING_SNAKE_CASE ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " F"bytes: {bad_attributes}" ) UpperCamelCase__ : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Any = 1 UpperCamelCase__ : Tuple = np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 UpperCamelCase__ : Any = np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE ): UpperCamelCase__ : Optional[int] = chunk_data else: UpperCamelCase__ : List[Any] = data def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ): """simple docstring""" if name in group.attrs: UpperCamelCase__ : List[Any] = [n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs[name]] else: UpperCamelCase__ : Optional[int] = [] UpperCamelCase__ : Union[str, Any] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def _a ( SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE : str ): if isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(SCREAMING_SNAKE_CASE , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE )
51
0
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
95
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : str = { 'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json', } class lowerCAmelCase__ ( a , a ): """simple docstring""" lowerCAmelCase__ = "convnextv2" def __init__( self : Any , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-12 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=224 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_stages __SCREAMING_SNAKE_CASE = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes __SCREAMING_SNAKE_CASE = [3, 3, 9, 3] if depths is None else depths __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = drop_path_rate __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
267
0
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants UpperCamelCase__ = Mapping[str, np.ndarray] UpperCamelCase__ = Mapping[str, Any] # Is a nested dict. UpperCamelCase__ = 0.01 @dataclasses.dataclass(frozen=__a ) class lowerCamelCase_ : lowerCAmelCase__ = 4_2 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowerCAmelCase__ = 4_2 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowerCAmelCase__ = 4_2 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowerCAmelCase__ = 4_2 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowerCAmelCase__ = 4_2 # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowerCAmelCase__ = None # Optional remark about the protein. Included as a comment in output PDB # files lowerCAmelCase__ = None # Templates used to generate this protein (prediction-only) lowerCAmelCase__ = None # Chain corresponding to each parent lowerCAmelCase__ = None def a__ ( lowerCAmelCase__ ) -> Protein: UpperCAmelCase__ : str = R'''(\[[A-Z]+\]\n)''' UpperCAmelCase__ : List[str] = [tag.strip() for tag in re.split(lowerCAmelCase__ , lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0] UpperCAmelCase__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) UpperCAmelCase__ : List[str] = ["N", "CA", "C"] UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[Any] = None UpperCAmelCase__ : int = None for g in groups: if "[PRIMARY]" == g[0]: UpperCAmelCase__ : Dict = g[1][0].strip() for i in range(len(lowerCAmelCase__ ) ): if seq[i] not in residue_constants.restypes: UpperCAmelCase__ : Dict = '''X''' # FIXME: strings are immutable UpperCAmelCase__ : Optional[Any] = np.array( [residue_constants.restype_order.get(lowerCAmelCase__ , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: UpperCAmelCase__ : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(lowerCAmelCase__ , g[1][axis].split() ) ) ) UpperCAmelCase__ : Any = np.array(lowerCAmelCase__ ) UpperCAmelCase__ : Any = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ : Tuple = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: UpperCAmelCase__ : Optional[Any] = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) UpperCAmelCase__ : int = np.zeros( ( len(lowerCAmelCase__ ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ : str = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=lowerCAmelCase__ , atom_mask=lowerCAmelCase__ , aatype=lowerCAmelCase__ , residue_index=np.arange(len(lowerCAmelCase__ ) ) , b_factors=lowerCAmelCase__ , ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 0 ) -> List[str]: UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : Tuple = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) UpperCAmelCase__ : Tuple = prot.parents UpperCAmelCase__ : int = prot.parents_chain_index if parents is not None and parents_chain_index is not None: UpperCAmelCase__ : int = [p for i, p in zip(lowerCAmelCase__ , lowerCAmelCase__ ) if i == chain_id] if parents is None or len(lowerCAmelCase__ ) == 0: UpperCAmelCase__ : Dict = ['''N/A'''] pdb_headers.append(F"""PARENT {" ".join(lowerCAmelCase__ )}""" ) return pdb_headers def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : Optional[Any] = pdb_str.split('''\n''' ) UpperCAmelCase__ : List[str] = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) UpperCAmelCase__ : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: UpperCAmelCase__ : Tuple = [] if prot.parents_chain_index is not None: UpperCAmelCase__ : Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(lowerCAmelCase__ ) , [] ) parent_dict[str(lowerCAmelCase__ )].append(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = max([int(lowerCAmelCase__ ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): UpperCAmelCase__ : List[str] = parent_dict.get(str(lowerCAmelCase__ ) , ['''N/A'''] ) parents_per_chain.append(lowerCAmelCase__ ) else: parents_per_chain.append(list(prot.parents ) ) else: UpperCAmelCase__ : Dict = [['''N/A''']] def make_parent_line(lowerCAmelCase__ ) -> str: return F"""PARENT {" ".join(lowerCAmelCase__ )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) UpperCAmelCase__ : Any = 0 for i, l in enumerate(lowerCAmelCase__ ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(lowerCAmelCase__ ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(lowerCAmelCase__ ): UpperCAmelCase__ : List[str] = parents_per_chain[chain_counter] else: UpperCAmelCase__ : int = ['''N/A'''] out_pdb_lines.append(make_parent_line(lowerCAmelCase__ ) ) return "\n".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> str: UpperCAmelCase__ : List[Any] = residue_constants.restypes + ['''X'''] def res_atoa(lowerCAmelCase__ ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) UpperCAmelCase__ : Optional[Any] = residue_constants.atom_types UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : Optional[Any] = prot.atom_mask UpperCAmelCase__ : List[str] = prot.aatype UpperCAmelCase__ : Any = prot.atom_positions UpperCAmelCase__ : Tuple = prot.residue_index.astype(np.intaa ) UpperCAmelCase__ : str = prot.b_factors UpperCAmelCase__ : Optional[int] = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) UpperCAmelCase__ : List[Any] = get_pdb_headers(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: pdb_lines.extend(lowerCAmelCase__ ) UpperCAmelCase__ : int = aatype.shape[0] UpperCAmelCase__ : List[str] = 1 UpperCAmelCase__ : Optional[Any] = 0 UpperCAmelCase__ : Dict = string.ascii_uppercase UpperCAmelCase__ : Optional[int] = None # Add all atom sites. for i in range(lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(lowerCAmelCase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue UpperCAmelCase__ : Tuple = '''ATOM''' UpperCAmelCase__ : Optional[int] = atom_name if len(lowerCAmelCase__ ) == 4 else F""" {atom_name}""" UpperCAmelCase__ : Tuple = '''''' UpperCAmelCase__ : Tuple = '''''' UpperCAmelCase__ : List[Any] = 1.0_0 UpperCAmelCase__ : Optional[Any] = atom_name[0] # Protein supports only C, N, O, S, this works. UpperCAmelCase__ : str = '''''' UpperCAmelCase__ : Optional[int] = '''A''' if chain_index is not None: UpperCAmelCase__ : int = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! UpperCAmelCase__ : Tuple = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(lowerCAmelCase__ ) atom_index += 1 UpperCAmelCase__ : List[str] = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: UpperCAmelCase__ : Dict = True UpperCAmelCase__ : int = chain_index[i + 1] if should_terminate: # Close the chain. UpperCAmelCase__ : Optional[int] = '''TER''' UpperCAmelCase__ : Optional[Any] = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(lowerCAmelCase__ ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(lowerCAmelCase__ , lowerCAmelCase__ ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> np.ndarray: return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Protein: return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowerCAmelCase__ , remark=lowerCAmelCase__ , parents=lowerCAmelCase__ , parents_chain_index=lowerCAmelCase__ , )
358
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = 0 @slow def lowercase_ ( self : Dict ): '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) # Check that tokenizer_type ≠ model_type UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A ) self.assertIsInstance(_A , _A ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' with pytest.raises(_A ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) if isinstance(_A , _A ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A ) else: self.assertEqual(tokenizer.do_lower_case , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def lowercase_ ( self : List[str] ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values() UpperCAmelCase__ : Any = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_A ) @require_tokenizers def lowercase_ ( self : Optional[int] ): '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A ) @require_tokenizers def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A ) UpperCAmelCase__ : Any = '''Hello, world. How are you?''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_A ) , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30_000 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_A , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_A , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase__ : Tuple = get_tokenizer_config(_A ) self.assertDictEqual(_A , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowercase_ ( self : Dict ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , slow_tokenizer_class=_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowercase_ ( self : Any ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) # Can register in two steps AutoTokenizer.register(_A , slow_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _A , slow_tokenizer_class=_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A ) bert_tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaises(_A ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = False class lowerCamelCase_ ( __a ): lowerCAmelCase__ = NewTokenizer lowerCAmelCase__ = False try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # If remote code is not set, the default is to use local UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def lowercase_ ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
299
0
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file UpperCAmelCase_ : str = TapasConfig.from_json_file(__lowerCamelCase ) # set absolute/relative position embeddings parameter UpperCAmelCase_ : Union[str, Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": UpperCAmelCase_ : str = TapasForQuestionAnswering(config=__lowerCamelCase ) elif task == "WTQ": # run_task_main.py hparams UpperCAmelCase_ : Union[str, Any] = 4 UpperCAmelCase_ : int = True # hparam_utils.py hparams UpperCAmelCase_ : Optional[Any] = 0.66_4694 UpperCAmelCase_ : Tuple = 0.20_7951 UpperCAmelCase_ : Dict = 0.12_1194 UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : int = False UpperCAmelCase_ : str = 0.035_2513 UpperCAmelCase_ : List[Any] = TapasForQuestionAnswering(config=__lowerCamelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams UpperCAmelCase_ : List[str] = 4 UpperCAmelCase_ : List[str] = False # hparam_utils.py hparams UpperCAmelCase_ : List[Any] = 36.4519 UpperCAmelCase_ : int = 0.90_3421 UpperCAmelCase_ : Union[str, Any] = 222.088 UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : str = True UpperCAmelCase_ : str = True UpperCAmelCase_ : Tuple = 0.76_3141 UpperCAmelCase_ : Dict = TapasForQuestionAnswering(config=__lowerCamelCase ) elif task == "TABFACT": UpperCAmelCase_ : List[Any] = TapasForSequenceClassification(config=__lowerCamelCase ) elif task == "MLM": UpperCAmelCase_ : Optional[Any] = TapasForMaskedLM(config=__lowerCamelCase ) elif task == "INTERMEDIATE_PRETRAINING": UpperCAmelCase_ : int = TapasModel(config=__lowerCamelCase ) else: raise ValueError(f"""Task {task} not supported.""" ) print(f"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Save pytorch-model (weights and configuration) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__lowerCamelCase ) # Save tokenizer files print(f"""Save tokenizer files to {pytorch_dump_path}""" ) UpperCAmelCase_ : Union[str, Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt", model_max_length=512 ) tokenizer.save_pretrained(__lowerCamelCase ) print("Used relative position embeddings:", model.config.reset_position_index_per_cell ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.' ) parser.add_argument( '--reset_position_index_per_cell', default=False, action='store_true', help='Whether to use relative position embeddings or not. Defaults to True.', ) parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--tapas_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained TAPAS model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
61
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration UpperCAmelCase__ = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def A ( _UpperCAmelCase : Optional[int] ) -> str: '''simple docstring''' _UpperCAmelCase = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def A ( _UpperCAmelCase : Dict ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase = list(s_dict.keys() ) for key in keys: _UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase ) print(F"{key} -> {new_key}" ) _UpperCAmelCase = s_dict.pop(_UpperCAmelCase ) return s_dict def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes: '''simple docstring''' os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) _UpperCAmelCase = os.path.basename(_UpperCAmelCase ) _UpperCAmelCase = url.split('/' )[-2] _UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ): raise RuntimeError(F"{download_target} exists and is not a regular file" ) if os.path.isfile(_UpperCAmelCase ): _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop: while True: _UpperCAmelCase = source.read(8_192 ) if not buffer: break output.write(_UpperCAmelCase ) loop.update(len(_UpperCAmelCase ) ) _UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read() if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' ) _UpperCAmelCase = original_checkpoint['dims'] _UpperCAmelCase = original_checkpoint['model_state_dict'] _UpperCAmelCase = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(_UpperCAmelCase ) rename_keys(_UpperCAmelCase ) _UpperCAmelCase = True _UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCAmelCase = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F" but all the following weights are missing {missing}" ) if tie_embeds: _UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase = proj_out_weights model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
0
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class _lowerCamelCase : """simple docstring""" def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Dict: '''simple docstring''' raise NotImplementedError() def _snake_case ( self )->Union[str, Any]: '''simple docstring''' raise NotImplementedError() class _lowerCamelCase ( A_ ): """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE )->str: '''simple docstring''' A_ : str = tokenizer A_ : List[Any] = skip_prompt A_ : List[str] = decode_kwargs # variables used in the streaming process A_ : Tuple = [] A_ : List[str] = 0 A_ : Optional[int] = True def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]: '''simple docstring''' if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: A_ : Dict = value[0] if self.skip_prompt and self.next_tokens_are_prompt: A_ : str = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) A_ : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): A_ : Dict = text[self.print_len :] A_ : Dict = [] A_ : Optional[Any] = 0 # If the last token is a CJK character, we print the characters. elif len(snake_case__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ): A_ : str = text[self.print_len :] self.print_len += len(snake_case__ ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: A_ : int = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(snake_case__ ) self.on_finalized_text(snake_case__ ) def _snake_case ( self )->List[Any]: '''simple docstring''' if len(self.token_cache ) > 0: A_ : Dict = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) A_ : int = text[self.print_len :] A_ : Tuple = [] A_ : Dict = 0 else: A_ : Tuple = "" A_ : List[str] = True self.on_finalized_text(snake_case__ , stream_end=snake_case__ ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False )->Tuple: '''simple docstring''' print(snake_case__ , flush=snake_case__ , end='''''' if not stream_end else None ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]: '''simple docstring''' if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False class _lowerCamelCase ( A_ ): """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE )->List[Any]: '''simple docstring''' super().__init__(snake_case__ , snake_case__ , **snake_case__ ) A_ : Tuple = Queue() A_ : Dict = None A_ : Dict = timeout def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False )->List[str]: '''simple docstring''' self.text_queue.put(snake_case__ , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self )->Tuple: '''simple docstring''' return self def _snake_case ( self )->int: '''simple docstring''' A_ : Any = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
360
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowerCamelCase : """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , )->Any: '''simple docstring''' A_ : List[Any] = parent A_ : int = batch_size A_ : str = seq_length A_ : int = is_training A_ : Any = use_token_type_ids A_ : Union[str, Any] = use_labels A_ : Any = vocab_size A_ : Dict = hidden_size A_ : Dict = num_hidden_layers A_ : int = num_attention_heads A_ : Optional[Any] = intermediate_size A_ : Dict = hidden_act A_ : List[str] = hidden_dropout_prob A_ : List[Any] = attention_probs_dropout_prob A_ : Union[str, Any] = max_position_embeddings A_ : Optional[int] = type_vocab_size A_ : str = type_sequence_label_size A_ : Tuple = initializer_range A_ : Union[str, Any] = num_labels A_ : List[str] = num_choices A_ : Union[str, Any] = scope A_ : Any = self.vocab_size - 1 def _snake_case ( self )->Any: '''simple docstring''' A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Any = None if self.use_token_type_ids: A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : str = None A_ : Union[str, Any] = None A_ : Optional[int] = None if self.use_labels: A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) A_ : Optional[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) A_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Tuple: '''simple docstring''' A_ : int = OpenAIGPTModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() A_ : int = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) A_ : Tuple = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) A_ : Any = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->List[str]: '''simple docstring''' A_ : int = OpenAIGPTLMHeadModel(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() A_ : Tuple = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Optional[int]: '''simple docstring''' A_ : List[Any] = OpenAIGPTDoubleHeadsModel(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() A_ : str = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->str: '''simple docstring''' A_ : Any = self.num_labels A_ : List[Any] = OpenAIGPTForSequenceClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() A_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self )->int: '''simple docstring''' A_ : Dict = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Optional[int] = config_and_inputs A_ : int = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" snake_case = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict: '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Optional[int]: '''simple docstring''' A_ : Optional[Any] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": A_ : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , ) A_ : List[Any] = inputs_dict['''labels'''] A_ : Any = inputs_dict['''labels'''] A_ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , ) A_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) return inputs_dict def _snake_case ( self )->Any: '''simple docstring''' A_ : Any = OpenAIGPTModelTester(self ) A_ : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 ) def _snake_case ( self )->Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def _snake_case ( self )->Tuple: '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->List[Any]: '''simple docstring''' A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Any: '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Tuple: '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) @slow def _snake_case ( self )->List[str]: '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[int] = OpenAIGPTModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @slow def _snake_case ( self )->Tuple: '''simple docstring''' A_ : Optional[int] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(_SCREAMING_SNAKE_CASE ) A_ : Optional[int] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # the president is A_ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 4_0477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the A_ : Dict = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE ) self.assertListEqual(output_ids[0].tolist() , _SCREAMING_SNAKE_CASE )
65
0
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) __snake_case = logging.getLogger() def _lowercase ( UpperCamelCase_ ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , 'all_results.json' ) if os.path.exists(UpperCamelCase_ ): with open(UpperCamelCase_ , 'r' ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase_ ) else: raise ValueError(F'can\'t find {path}' ) return results __snake_case = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowercase__ ( _UpperCAmelCase ): def A_ ( self : str ): import xla_spawn SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE__ = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split() with patch.object(UpperCAmelCase_ , 'argv' , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ = time() xla_spawn.main() SCREAMING_SNAKE_CASE__ = time() SCREAMING_SNAKE_CASE__ = get_results(UpperCAmelCase_ ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def A_ ( self : List[Any] ): import xla_spawn SCREAMING_SNAKE_CASE__ = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(UpperCAmelCase_ , 'argv' , UpperCAmelCase_ ): xla_spawn.main()
176
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __snake_case = logging.get_logger(__name__) __snake_case = TypeVar("""DatasetType""", Dataset, IterableDataset) def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "first_exhausted" , ) -> DatasetType: '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase_ ): if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' ) if i == 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ ) else: return _interleave_iterable_datasets( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ ) def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , ) -> DatasetType: '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(UpperCamelCase_ ): if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ): if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' 'is an empty dataset dictionary.' ) raise ValueError( F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' ) if i == 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ( (Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ ) else: return _concatenate_iterable_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
176
1
'''simple docstring''' import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def __UpperCAmelCase ( a_: str, a_: Dict, a_: Optional[int], a_: int ): _UpperCAmelCase : str = s.rsplit(a_, a_ ) return new.join(a_ ) def __UpperCAmelCase ( a_: Union[str, Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def __UpperCAmelCase ( a_: Union[str, Any] ): _UpperCAmelCase : Optional[Any] = {} _UpperCAmelCase : Dict = ["group_1", "group_2", "group_3", "group_4"] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: _UpperCAmelCase : Optional[Any] = key.replace(f"""{group_key}.""", f"""{group_key}.group.""" ) if "res_path" in key: _UpperCAmelCase : Optional[int] = key.replace("res_path.", "res_path.path." ) if key.endswith(".w" ): _UpperCAmelCase : Optional[int] = rreplace(a_, ".w", ".weight", 1 ) if key.endswith(".b" ): _UpperCAmelCase : Any = rreplace(a_, ".b", ".bias", 1 ) _UpperCAmelCase : Optional[Any] = value.float() return upgrade @torch.no_grad() def __UpperCAmelCase ( a_: Tuple, a_: Dict, a_: Optional[int]=None, a_: Optional[int]=True ): from dall_e import Encoder _UpperCAmelCase : Dict = Encoder() if os.path.exists(a_ ): _UpperCAmelCase : Any = torch.load(a_ ) else: _UpperCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(a_ ) if isinstance(a_, a_ ): _UpperCAmelCase : List[Any] = ckpt.state_dict() encoder.load_state_dict(a_ ) if config_path is not None: _UpperCAmelCase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(a_ ) else: _UpperCAmelCase : List[str] = FlavaImageCodebookConfig() _UpperCAmelCase : int = FlavaImageCodebook(a_ ).eval() _UpperCAmelCase : str = encoder.state_dict() _UpperCAmelCase : Optional[int] = upgrade_state_dict(a_ ) hf_model.load_state_dict(a_ ) _UpperCAmelCase : List[str] = hf_model.state_dict() _UpperCAmelCase : Optional[Any] = count_parameters(a_ ) _UpperCAmelCase : Optional[Any] = count_parameters(a_ ) assert torch.allclose(a_, a_, atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(a_ ) else: return hf_state_dict if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __a = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
17
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = BarthezTokenizer UpperCamelCase_ : List[Any] = BarthezTokenizerFast UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Optional[int] = True def _lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" super().setUp() _UpperCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = tokenizer def _lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = "<pad>" _UpperCAmelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowerCAmelCase__ ) , 1_0_1_1_2_2 ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" _UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] _UpperCAmelCase : Optional[int] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] _UpperCAmelCase : int = self.tokenizer( lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" if not self.test_rust_tokenizer: return _UpperCAmelCase : Optional[int] = self.get_tokenizer() _UpperCAmelCase : Optional[int] = self.get_rust_tokenizer() _UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé." _UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() _UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _lowerCAmelCase ( self : int ) -> int: """simple docstring""" _UpperCAmelCase : Optional[Any] = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCAmelCase : Tuple = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
17
1
"""simple docstring""" def lowercase ( A_ )-> int: '''simple docstring''' return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") __lowercase = int(input("""Enter number: """).strip()) print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
40
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : int =logging.get_logger(__name__) lowerCAmelCase : List[str] ='''▁''' lowerCAmelCase : List[str] ={ '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCAmelCase : Optional[Any] ={ '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } lowerCAmelCase : int ={ '''facebook/m2m100_418M''': 1_024, } # fmt: off lowerCAmelCase : str ={ '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class a_ ( _lowerCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = PRETRAINED_VOCAB_FILES_MAP __A = ["input_ids", "attention_mask"] __A = [] __A = [] def __init__( self : Any , lowercase : Any , lowercase : List[Any] , lowercase : int=None , lowercase : Optional[Any]=None , lowercase : Union[str, Any]="<s>" , lowercase : Any="</s>" , lowercase : Optional[int]="</s>" , lowercase : List[Any]="<pad>" , lowercase : Optional[int]="<unk>" , lowercase : Optional[int]="m2m100" , lowercase : Optional[Dict[str, Any]] = None , lowercase : Any=8 , **lowercase : int , ): """simple docstring""" lowercase_ :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs lowercase_ :Optional[Any] = language_codes lowercase_ :Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes] lowercase_ :List[Any] = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code} lowercase_ :Union[str, Any] = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(lowercase ) for lang_code in fairseq_language_code if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , ) lowercase_ :Optional[int] = vocab_file lowercase_ :Any = load_json(lowercase ) lowercase_ :Optional[Any] = {v: k for k, v in self.encoder.items()} lowercase_ :List[str] = spm_file lowercase_ :List[str] = load_spm(lowercase , self.sp_model_kwargs ) lowercase_ :Optional[int] = len(self.encoder ) lowercase_ :int = { self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase ) } lowercase_ :List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )} lowercase_ :List[Any] = {v: k for k, v in self.lang_token_to_id.items()} lowercase_ :int = src_lang if src_lang is not None else "en" lowercase_ :Union[str, Any] = tgt_lang lowercase_ :List[Any] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) lowercase_ :int = num_madeup_words @property def lowercase__ ( self : List[str] ): """simple docstring""" return len(self.encoder ) + len(self.lang_token_to_id ) @property def lowercase__ ( self : Any ): """simple docstring""" return self._src_lang @src_lang.setter def lowercase__ ( self : Optional[int] , lowercase : str ): """simple docstring""" lowercase_ :str = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase__ ( self : Dict , lowercase : str ): """simple docstring""" return self.sp_model.encode(lowercase , out_type=lowercase ) def lowercase__ ( self : Tuple , lowercase : Dict ): """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(lowercase , self.encoder[self.unk_token] ) def lowercase__ ( self : Any , lowercase : int ): """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(lowercase , self.unk_token ) def lowercase__ ( self : int , lowercase : int ): """simple docstring""" lowercase_ :Optional[Any] = [] lowercase_ :Any = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase ) + token lowercase_ :str = [] else: current_sub_tokens.append(lowercase ) out_string += self.sp_model.decode(lowercase ) return out_string.strip() def lowercase__ ( self : Any , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase ) lowercase_ :List[Any] = [1] * len(self.prefix_tokens ) lowercase_ :List[Any] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(lowercase )) + suffix_ones return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones def lowercase__ ( self : Union[str, Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase__ ( self : Union[str, Any] ): """simple docstring""" lowercase_ :str = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ): """simple docstring""" lowercase_ :Any = self.__dict__.copy() lowercase_ :str = None return state def __setstate__( self : Tuple , lowercase : Dict ): """simple docstring""" lowercase_ :int = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowercase_ :List[str] = {} lowercase_ :List[Any] = load_spm(self.spm_file , self.sp_model_kwargs ) def lowercase__ ( self : str , lowercase : str , lowercase : Optional[str] = None ): """simple docstring""" lowercase_ :Dict = Path(lowercase ) if not save_dir.is_dir(): raise OSError(F'{save_directory} should be a directory' ) lowercase_ :Dict = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) lowercase_ :Dict = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder , lowercase ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , lowercase ) elif not os.path.isfile(self.spm_file ): with open(lowercase , "wb" ) as fi: lowercase_ :List[str] = self.sp_model.serialized_model_proto() fi.write(lowercase ) return (str(lowercase ), str(lowercase )) def lowercase__ ( self : List[str] , lowercase : List[str] , lowercase : str = "en" , lowercase : Optional[List[str]] = None , lowercase : str = "ro" , **lowercase : Optional[int] , ): """simple docstring""" lowercase_ :int = src_lang lowercase_ :Optional[int] = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase ) def lowercase__ ( self : List[Any] , lowercase : Any , lowercase : Optional[str] , lowercase : Optional[str] , **lowercase : Union[str, Any] ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowercase_ :List[str] = src_lang lowercase_ :Union[str, Any] = self(lowercase , add_special_tokens=lowercase , **lowercase ) lowercase_ :str = self.get_lang_id(lowercase ) lowercase_ :Union[str, Any] = tgt_lang_id return inputs def lowercase__ ( self : str ): """simple docstring""" self.set_src_lang_special_tokens(self.src_lang ) def lowercase__ ( self : Tuple ): """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase__ ( self : str , lowercase : str ): """simple docstring""" lowercase_ :List[str] = self.get_lang_token(lowercase ) lowercase_ :List[str] = self.lang_token_to_id[lang_token] lowercase_ :List[Any] = [self.cur_lang_id] lowercase_ :str = [self.eos_token_id] def lowercase__ ( self : str , lowercase : str ): """simple docstring""" lowercase_ :Optional[int] = self.get_lang_token(lowercase ) lowercase_ :Tuple = self.lang_token_to_id[lang_token] lowercase_ :Dict = [self.cur_lang_id] lowercase_ :List[Any] = [self.eos_token_id] def lowercase__ ( self : Union[str, Any] , lowercase : str ): """simple docstring""" return self.lang_code_to_token[lang] def lowercase__ ( self : Dict , lowercase : str ): """simple docstring""" lowercase_ :Union[str, Any] = self.get_lang_token(lowercase ) return self.lang_token_to_id[lang_token] def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Dict[str, Any] ): lowercase_ :List[str] = sentencepiece.SentencePieceProcessor(**__lowerCamelCase ) spm.Load(str(__lowerCamelCase ) ) return spm def UpperCAmelCase_ ( __lowerCamelCase : str ): with open(__lowerCamelCase ,"r" ) as f: return json.load(__lowerCamelCase ) def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : str ): with open(__lowerCamelCase ,"w" ) as f: json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=2 )
223
0
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case : Optional[int] = logging.get_logger(__name__) __snake_case : Tuple = { """huggingface/time-series-transformer-tourism-monthly""": ( """https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json""" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class __SCREAMING_SNAKE_CASE ( __lowercase): _SCREAMING_SNAKE_CASE : str = '''time_series_transformer''' _SCREAMING_SNAKE_CASE : List[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "student_t" , _UpperCamelCase = "nll" , _UpperCamelCase = 1 , _UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] , _UpperCamelCase = "mean" , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 32 , _UpperCamelCase = 32 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = True , _UpperCamelCase = "gelu" , _UpperCamelCase = 64 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 1_00 , _UpperCamelCase = 0.02 , _UpperCamelCase=True , **_UpperCamelCase , ): """simple docstring""" # time series specific configuration lowerCAmelCase__ = prediction_length lowerCAmelCase__ = context_length or prediction_length lowerCAmelCase__ = distribution_output lowerCAmelCase__ = loss lowerCAmelCase__ = input_size lowerCAmelCase__ = num_time_features lowerCAmelCase__ = lags_sequence lowerCAmelCase__ = scaling lowerCAmelCase__ = num_dynamic_real_features lowerCAmelCase__ = num_static_real_features lowerCAmelCase__ = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(_UpperCamelCase ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) lowerCAmelCase__ = cardinality else: lowerCAmelCase__ = [0] if embedding_dimension and num_static_categorical_features > 0: if len(_UpperCamelCase ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) lowerCAmelCase__ = embedding_dimension else: lowerCAmelCase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCAmelCase__ = num_parallel_samples # Transformer architecture configuration lowerCAmelCase__ = input_size * len(_UpperCamelCase ) + self._number_of_features lowerCAmelCase__ = d_model lowerCAmelCase__ = encoder_attention_heads lowerCAmelCase__ = decoder_attention_heads lowerCAmelCase__ = encoder_ffn_dim lowerCAmelCase__ = decoder_ffn_dim lowerCAmelCase__ = encoder_layers lowerCAmelCase__ = decoder_layers lowerCAmelCase__ = dropout lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = activation_dropout lowerCAmelCase__ = encoder_layerdrop lowerCAmelCase__ = decoder_layerdrop lowerCAmelCase__ = activation_function lowerCAmelCase__ = init_std lowerCAmelCase__ = use_cache super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase ) @property def UpperCamelCase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
122
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase): _SCREAMING_SNAKE_CASE : int = KandinskyVaaInpaintPipeline _SCREAMING_SNAKE_CASE : int = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] _SCREAMING_SNAKE_CASE : Optional[Any] = [ '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] _SCREAMING_SNAKE_CASE : List[Any] = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _SCREAMING_SNAKE_CASE : Optional[Any] = False @property def UpperCamelCase__ ( self ): """simple docstring""" return 32 @property def UpperCamelCase__ ( self ): """simple docstring""" return 32 @property def UpperCamelCase__ ( self ): """simple docstring""" return self.time_input_dim @property def UpperCamelCase__ ( self ): """simple docstring""" return self.time_input_dim * 4 @property def UpperCamelCase__ ( self ): """simple docstring""" return 1_00 @property def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase__ = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } lowerCAmelCase__ = UNetaDConditionModel(**_UpperCamelCase ) return model @property def UpperCamelCase__ ( self ): """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs ) return model def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = self.dummy_unet lowerCAmelCase__ = self.dummy_movq lowerCAmelCase__ = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=_UpperCamelCase , ) lowerCAmelCase__ = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _UpperCamelCase ) # create init_image lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((2_56, 2_56) ) # create mask lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa ) lowerCAmelCase__ = 0 if str(_UpperCamelCase ).startswith('mps' ): lowerCAmelCase__ = torch.manual_seed(_UpperCamelCase ) else: lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) lowerCAmelCase__ = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = 'cpu' lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = self.pipeline_class(**_UpperCamelCase ) lowerCAmelCase__ = pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) lowerCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCamelCase ) ) lowerCAmelCase__ = output.images lowerCAmelCase__ = pipe( **self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0] lowerCAmelCase__ = image[0, -3:, -3:, -1] lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1] print(F"image.shape {image.shape}" ) assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ = np.array( [0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def UpperCamelCase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCamelCase__ ( self ): """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) lowerCAmelCase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) lowerCAmelCase__ = np.ones((7_68, 7_68) , dtype=np.floataa ) lowerCAmelCase__ = 0 lowerCAmelCase__ = 'a hat' lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(_UpperCamelCase ) lowerCAmelCase__ = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) lowerCAmelCase__ = pipeline.to(_UpperCamelCase ) pipeline.set_progress_bar_config(disable=_UpperCamelCase ) lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 ) lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior( _UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple() lowerCAmelCase__ = pipeline( image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , ) lowerCAmelCase__ = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
122
1
from __future__ import annotations def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> Optional[int]: UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = len(A__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: UpperCamelCase : Tuple = i + 1 else: UpperCamelCase : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
119
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''t5''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , lowerCAmelCase_ : List[Any]=32_128 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[int] , ) -> int: UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : Optional[Any] = d_model UpperCAmelCase_ : str = d_kv UpperCAmelCase_ : Any = d_ff UpperCAmelCase_ : int = num_layers UpperCAmelCase_ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Optional[Any] = num_heads UpperCAmelCase_ : Any = relative_attention_num_buckets UpperCAmelCase_ : Optional[Any] = relative_attention_max_distance UpperCAmelCase_ : Optional[Any] = dropout_rate UpperCAmelCase_ : Tuple = layer_norm_epsilon UpperCAmelCase_ : int = initializer_factor UpperCAmelCase_ : int = feed_forward_proj UpperCAmelCase_ : str = use_cache UpperCAmelCase_ : Tuple = self.feed_forward_proj.split("-" ) UpperCAmelCase_ : List[Any] = act_info[-1] UpperCAmelCase_ : Optional[int] = act_info[0] == "gated" if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : int = "gelu_new" super().__init__( pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , ) class UpperCamelCase_ (__A ): @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase_ : Any = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ : List[Any] = "past_encoder_sequence + sequence" UpperCAmelCase_ : Union[str, Any] = {0: "batch"} UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" ) return common_inputs @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: return 13
268
0
"""simple docstring""" from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
362
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[str] = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
24
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _UpperCamelCase = logging.getLogger(__name__) @dataclass class lowerCamelCase_ : """simple docstring""" a_ =field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a_ =field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a_ =field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) a_ =field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a_ =field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a_ =field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class lowerCamelCase_ : """simple docstring""" a_ =field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) a_ =field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) a_ =field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a_ =field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def a_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) __lowerCamelCase : List[str] = import_module('tasks' ) try: __lowerCamelCase : Optional[int] = getattr(_lowerCAmelCase ,model_args.task_type ) __lowerCamelCase : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ' F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' ,_lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __lowerCamelCase : Any = token_classification_task.get_labels(data_args.labels ) __lowerCamelCase : Dict[int, str] = dict(enumerate(_lowerCAmelCase ) ) __lowerCamelCase : Optional[Any] = len(_lowerCAmelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=_lowerCAmelCase ,idalabel=_lowerCAmelCase ,labelaid={label: i for i, label in enumerate(_lowerCAmelCase )} ,cache_dir=model_args.cache_dir ,) __lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast ,) __lowerCamelCase : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=_lowerCAmelCase ,cache_dir=model_args.cache_dir ,) # Get datasets __lowerCamelCase : Any = ( TokenClassificationDataset( token_classification_task=_lowerCAmelCase ,data_dir=data_args.data_dir ,tokenizer=_lowerCAmelCase ,labels=_lowerCAmelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,) if training_args.do_train else None ) __lowerCamelCase : Any = ( TokenClassificationDataset( token_classification_task=_lowerCAmelCase ,data_dir=data_args.data_dir ,tokenizer=_lowerCAmelCase ,labels=_lowerCAmelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,) if training_args.do_eval else None ) def align_predictions(_lowerCAmelCase ,_lowerCAmelCase ) -> Tuple[List[int], List[int]]: __lowerCamelCase : List[str] = np.argmax(_lowerCAmelCase ,axis=2 ) __lowerCamelCase ,__lowerCamelCase : Tuple = preds.shape __lowerCamelCase : Tuple = [[] for _ in range(_lowerCAmelCase )] __lowerCamelCase : Any = [[] for _ in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(_lowerCAmelCase ) -> Dict: __lowerCamelCase ,__lowerCamelCase : List[str] = align_predictions(p.predictions ,p.label_ids ) return { "accuracy_score": accuracy_score(_lowerCAmelCase ,_lowerCAmelCase ), "precision": precision_score(_lowerCAmelCase ,_lowerCAmelCase ), "recall": recall_score(_lowerCAmelCase ,_lowerCAmelCase ), "f1": fa_score(_lowerCAmelCase ,_lowerCAmelCase ), } # Data collator __lowerCamelCase : Any = DataCollatorWithPadding(_lowerCAmelCase ,pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __lowerCamelCase : int = Trainer( model=_lowerCAmelCase ,args=_lowerCAmelCase ,train_dataset=_lowerCAmelCase ,eval_dataset=_lowerCAmelCase ,compute_metrics=_lowerCAmelCase ,data_collator=_lowerCAmelCase ,) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowerCamelCase : Dict = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowerCamelCase : List[Any] = trainer.evaluate() __lowerCamelCase : List[str] = os.path.join(training_args.output_dir ,'eval_results.txt' ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase ,'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' ,_lowerCAmelCase ,_lowerCAmelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(_lowerCAmelCase ) # Predict if training_args.do_predict: __lowerCamelCase : List[Any] = TokenClassificationDataset( token_classification_task=_lowerCAmelCase ,data_dir=data_args.data_dir ,tokenizer=_lowerCAmelCase ,labels=_lowerCAmelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.test ,) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : List[Any] = trainer.predict(_lowerCAmelCase ) __lowerCamelCase ,__lowerCamelCase : Optional[Any] = align_predictions(_lowerCAmelCase ,_lowerCAmelCase ) __lowerCamelCase : Optional[Any] = os.path.join(training_args.output_dir ,'test_results.txt' ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase ,'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' ,_lowerCAmelCase ,_lowerCAmelCase ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __lowerCamelCase : Tuple = os.path.join(training_args.output_dir ,'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(_lowerCAmelCase ,'w' ) as writer: with open(os.path.join(data_args.data_dir ,'test.txt' ) ,'r' ) as f: token_classification_task.write_predictions_to_file(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) return results def a_ ( _lowerCAmelCase ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
208
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _UpperCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
208
1
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> bool: __snake_case : Union[str, Any] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(2_7)) print(perfect_cube(4))
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int = 1_00 ) -> int: __snake_case : Any = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
0
1
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : List[Any] ): snake_case__ : str = tempfile.mkdtemp() snake_case__ : Tuple = 5 # Realm tok snake_case__ : Union[str, Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """test""", """question""", """this""", """is""", """the""", """first""", """second""", """third""", """fourth""", """fifth""", """record""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """realm_tokenizer""" ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) snake_case__ : Union[str, Any] = os.path.join(snake_case_ , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) snake_case__ : List[Any] = os.path.join(self.tmpdirname , """realm_block_records""" ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) def lowerCamelCase ( self : List[str] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) ) def lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self : int ): snake_case__ : List[Any] = RealmConfig(num_block_records=self.num_block_records ) return config def lowerCamelCase ( self : int ): snake_case__ : str = Dataset.from_dict( { """id""": ["""0""", """1"""], """question""": ["""foo""", """bar"""], """answers""": [["""Foo""", """Bar"""], ["""Bar"""]], } ) return dataset def lowerCamelCase ( self : List[str] ): snake_case__ : str = np.array( [ b"""This is the first record""", b"""This is the second record""", b"""This is the third record""", b"""This is the fourth record""", b"""This is the fifth record""", b"""This is a longer longer longer record""", ] , dtype=snake_case_ , ) return block_records def lowerCamelCase ( self : int ): snake_case__ : Dict = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def lowerCamelCase ( self : int ): snake_case__ : Tuple = self.get_config() snake_case__ : Any = self.get_dummy_retriever() snake_case__ : str = retriever.tokenizer snake_case__ : Optional[int] = np.array([0, 3] , dtype="""long""" ) snake_case__ : List[Any] = tokenizer(["""Test question"""] ).input_ids snake_case__ : List[str] = tokenizer( ["""the fourth"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids snake_case__ : List[Any] = config.reader_seq_len snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = retriever( snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : List[str] = self.get_config() snake_case__ : List[str] = self.get_dummy_retriever() snake_case__ : str = retriever.tokenizer snake_case__ : List[Any] = np.array([0, 3, 5] , dtype="""long""" ) snake_case__ : Union[str, Any] = tokenizer(["""Test question"""] ).input_ids snake_case__ : Optional[int] = tokenizer( ["""the fourth""", """longer longer"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids snake_case__ : Any = config.reader_seq_len snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = retriever( snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" ) self.assertEqual([False, True, True] , snake_case_ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , snake_case_ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : List[str] = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) # Test local path snake_case__ : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" ) # Test mocked remote path with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download: snake_case__ : Tuple = os.path.join( os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME ) snake_case__ : List[str] = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
35
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
0
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowercase__ = get_tests_dir("""fixtures""") lowercase__ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") lowercase__ = get_tests_dir("""fixtures/dummy-config.json""") class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : str = 0 def A_ ( self ): _lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): with tempfile.TemporaryDirectory() as tmpdirname: _lowerCamelCase : int = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _lowerCamelCase : str = AutoFeatureExtractor.from_pretrained(lowercase ).to_dict() config_dict.pop('feature_extractor_type' ) _lowerCamelCase : List[str] = WavaVecaFeatureExtractor(**lowercase ) # save in new folder model_config.save_pretrained(lowercase ) config.save_pretrained(lowercase ) _lowerCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(lowercase ) # make sure private variable is not incorrectly saved _lowerCamelCase : Union[str, Any] = json.loads(config.to_json_string() ) self.assertTrue('_processor_class' not in dict_as_saved ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): with self.assertRaisesRegex( lowercase , 'bert-base is not a local folder and is not a valid model identifier' ): _lowerCamelCase : int = AutoFeatureExtractor.from_pretrained('bert-base' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): _lowerCamelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase , revision='aaaaaa' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ): _lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' ) def A_ ( self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase ): _lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase ): _lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase ) _lowerCamelCase : int = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowercase ) _lowerCamelCase : Any = AutoFeatureExtractor.from_pretrained(lowercase , trust_remote_code=lowercase ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) def A_ ( self ): try: AutoConfig.register('custom' , lowercase ) AutoFeatureExtractor.register(lowercase , lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoFeatureExtractor.register(lowercase , lowercase ) # Now that the config is registered, it can be used as any other config with the auto-API _lowerCamelCase : List[Any] = CustomFeatureExtractor.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowercase ) _lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def A_ ( self ): class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = True try: AutoConfig.register('custom' , lowercase ) AutoFeatureExtractor.register(lowercase , lowercase ) # If remote code is not set, the default is to use local _lowerCamelCase : Dict = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _lowerCamelCase : Any = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(not hasattr(lowercase , 'is_local' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
12
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) lowercase__ = parser.parse_args() lowercase__ = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
12
1
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ): """simple docstring""" lowercase__ = RobertaTokenizer lowercase__ = RobertaTokenizerFast lowercase__ = True lowercase__ = {"cls_token": "<s>"} def __lowerCAmelCase ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase__ : Any = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase__ : str = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) lowerCAmelCase__ : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase__ : Any = {'''unk_token''': '''<unk>'''} lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' ) with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCamelCase_ ) ) def __lowerCAmelCase ( self : Dict ,**lowercase_ : Optional[int] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ) def __lowerCAmelCase ( self : Tuple ,**lowercase_ : Tuple ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Dict ): lowerCAmelCase__ : List[str] = '''lower newer''' lowerCAmelCase__ : Optional[int] = '''lower newer''' return input_text, output_text def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : List[Any] = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) lowerCAmelCase__ : str = '''lower newer''' lowerCAmelCase__ : Any = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowerCAmelCase__ : int = tokenizer.tokenize(lowerCamelCase_ ) # , add_prefix_space=True) self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) lowerCAmelCase__ : Union[str, Any] = tokens + [tokenizer.unk_token] lowerCAmelCase__ : Optional[int] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ ) def __lowerCAmelCase ( self : List[Any] ): lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' ,add_special_tokens=lowerCamelCase_ ) ,[0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' ,add_special_tokens=lowerCamelCase_ ) ,[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] ,) @slow def __lowerCAmelCase ( self : Optional[Any] ): lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained('''roberta-base''' ) lowerCAmelCase__ : Optional[int] = tokenizer.encode('''sequence builders''' ,add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ : str = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ : Dict = tokenizer.encode( '''sequence builders''' ,add_special_tokens=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ) lowerCAmelCase__ : List[str] = tokenizer.encode( '''sequence builders''' ,'''multi-sequence build''' ,add_special_tokens=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ) lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ) lowerCAmelCase__ : int = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ,lowerCamelCase_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __lowerCAmelCase ( self : int ): lowerCAmelCase__ : Dict = self.get_tokenizer() lowerCAmelCase__ : int = '''Encode this sequence.''' lowerCAmelCase__ : Dict = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments lowerCAmelCase__ : int = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ) lowerCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowerCamelCase_ ,lowerCamelCase_ ) lowerCAmelCase__ : str = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ) lowerCAmelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) lowerCAmelCase__ : Tuple = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ) lowerCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowerCamelCase_ ,lowerCamelCase_ ) # Testing spaces after special tokens lowerCAmelCase__ : Union[str, Any] = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ )} ) # mask token has a left space lowerCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) lowerCAmelCase__ : List[str] = '''Encode <mask> sequence''' lowerCAmelCase__ : Optional[int] = '''Encode <mask>sequence''' lowerCAmelCase__ : int = tokenizer.encode(lowerCamelCase_ ) lowerCAmelCase__ : List[Any] = encoded.index(lowerCamelCase_ ) lowerCAmelCase__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) lowerCAmelCase__ : Optional[Any] = tokenizer.encode(lowerCamelCase_ ) lowerCAmelCase__ : Tuple = encoded.index(lowerCamelCase_ ) lowerCAmelCase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowerCamelCase_ ,lowerCamelCase_ ) def __lowerCAmelCase ( self : str ): pass def __lowerCAmelCase ( self : Tuple ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ ) lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ ) lowerCAmelCase__ : str = '''A, <mask> AllenNLP sentence.''' lowerCAmelCase__ : Optional[Any] = tokenizer_r.encode_plus(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ) lowerCAmelCase__ : Optional[int] = tokenizer_p.encode_plus(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) ,sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) ,sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) ,) lowerCAmelCase__ : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) lowerCAmelCase__ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( lowerCamelCase_ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowerCamelCase_ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def __lowerCAmelCase ( self : Dict ): for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ): lowerCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowerCAmelCase__ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] ,lowerCamelCase_ ) self.assertEqual(post_processor_state['''add_prefix_space'''] ,lowerCamelCase_ ) self.assertEqual(post_processor_state['''trim_offsets'''] ,lowerCamelCase_ ) def __lowerCAmelCase ( self : List[str] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): lowerCAmelCase__ : str = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` lowerCAmelCase__ : List[Any] = F'{text_of_1_token} {text_of_1_token}' lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ : List[Any] = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,) lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ : Dict = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,) lowerCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ : Dict = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,) lowerCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ : int = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,) lowerCAmelCase__ : List[str] = F' {text}' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowerCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ : Optional[int] = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,) lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ : Tuple = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,) lowerCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ ) lowerCAmelCase__ : List[str] = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,)
106
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class _lowercase ( unittest.TestCase ): """simple docstring""" def UpperCamelCase_ (self ): """simple docstring""" a = tempfile.mkdtemp() a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "的", "价", "格", "是", "15", "便", "alex", "##andra", ",", "。", "-", "t", "shirt", ] a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) a = { "do_resize": True, "size": {"height": 224, "width": 224}, "do_center_crop": True, "crop_size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073], "image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711], "do_convert_rgb": True, } a = os.path.join(self.tmpdirname , lowerCamelCase_ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCamelCase_ (self , **lowerCamelCase_ ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def UpperCamelCase_ (self , **lowerCamelCase_ ): """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def UpperCamelCase_ (self , **lowerCamelCase_ ): """simple docstring""" return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def UpperCamelCase_ (self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ (self ): """simple docstring""" a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ (self ): """simple docstring""" a = self.get_tokenizer() a = self.get_rust_tokenizer() a = self.get_image_processor() a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ ) a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase_ ) self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowerCamelCase_ ) self.assertIsInstance(processor_fast.image_processor , lowerCamelCase_ ) def UpperCamelCase_ (self ): """simple docstring""" a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" ) a = self.get_image_processor(do_normalize=lowerCamelCase_ ) a = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=lowerCamelCase_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCamelCase_ ) def UpperCamelCase_ (self ): """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) a = self.prepare_image_inputs() a = image_processor(lowerCamelCase_ , return_tensors="np" ) a = processor(images=lowerCamelCase_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCamelCase_ (self ): """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) a = "Alexandra,T-shirt的价格是15便士。" a = processor(text=lowerCamelCase_ ) a = tokenizer(lowerCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ (self ): """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase_ ): processor() def UpperCamelCase_ (self ): """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(lowerCamelCase_ ) a = tokenizer.batch_decode(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) def UpperCamelCase_ (self ): """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ ) a = "Alexandra,T-shirt的价格是15便士。" a = self.prepare_image_inputs() a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
227
0
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : int = len(lowerCamelCase__ ) lowercase__ : Union[str, Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): lowercase__ : Any = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): lowercase__ : Union[str, Any] = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: lowercase__ : List[str] = subset[i - 1][j] if arr[i - 1] <= j: lowercase__ : int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
362
def __lowerCamelCase ( lowerCamelCase__ = 1_000 ): """simple docstring""" lowercase__ , lowercase__ : int = 1, 1 lowercase__ : List[Any] = [] for i in range(1 , n + 1 ): lowercase__ : Dict = prev_numerator + 2 * prev_denominator lowercase__ : Tuple = prev_numerator + prev_denominator if len(str(lowerCamelCase__ ) ) > len(str(lowerCamelCase__ ) ): result.append(lowerCamelCase__ ) lowercase__ : int = numerator lowercase__ : int = denominator return len(lowerCamelCase__ ) if __name__ == "__main__": print(f'''{solution() = }''')
121
0
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib A: Dict = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } A: str = logging.WARNING def _snake_case ( ): UpperCAmelCase : Union[str, Any] = os.getenv("""DATASETS_VERBOSITY""" , UpperCamelCase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"Unknown option DATASETS_VERBOSITY={env_level_str}, " F"has to be one of: { ', '.join(log_levels.keys() ) }" ) return _default_log_level def _snake_case ( ): return __name__.split(""".""" )[0] def _snake_case ( ): return logging.getLogger(_get_library_name() ) def _snake_case ( ): # Apply our default configuration to the library root logger. UpperCAmelCase : List[Any] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def _snake_case ( ): UpperCAmelCase : Optional[Any] = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def _snake_case ( UpperCamelCase : Optional[str] = None ): if name is None: UpperCAmelCase : int = _get_library_name() return logging.getLogger(UpperCamelCase ) def _snake_case ( ): return _get_library_root_logger().getEffectiveLevel() def _snake_case ( UpperCamelCase : int ): _get_library_root_logger().setLevel(UpperCamelCase ) def _snake_case ( ): return set_verbosity(UpperCamelCase ) def _snake_case ( ): return set_verbosity(UpperCamelCase ) def _snake_case ( ): return set_verbosity(UpperCamelCase ) def _snake_case ( ): return set_verbosity(UpperCamelCase ) def _snake_case ( ): UpperCAmelCase : Optional[Any] = False def _snake_case ( ): UpperCAmelCase : List[Any] = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class SCREAMING_SNAKE_CASE__ : def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int: # pylint: disable=unused-argument '''simple docstring''' UpperCAmelCase : str = args[0] if args else None def __iter__( self ) -> List[Any]: '''simple docstring''' return iter(self._iterator ) def __getattr__( self , _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' def empty_fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ) -> int: '''simple docstring''' return self def __exit__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' return A: List[Any] = True class SCREAMING_SNAKE_CASE__ : def __call__( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' if _tqdm_active and not disable: return tqdm_lib.tqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) else: return EmptyTqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' UpperCAmelCase : str = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() A: List[Any] = _tqdm_cls() def _snake_case ( ): global _tqdm_active return bool(_tqdm_active ) def _snake_case ( ): global _tqdm_active UpperCAmelCase : Optional[Any] = True def _snake_case ( ): global _tqdm_active UpperCAmelCase : Tuple = False
109
"""simple docstring""" A: Union[str, Any] = { 0: "0", 1: "1", 2: "2", 3: "3", 4: "4", 5: "5", 6: "6", 7: "7", 8: "8", 9: "9", 1_0: "a", 1_1: "b", 1_2: "c", 1_3: "d", 1_4: "e", 1_5: "f", } def _snake_case ( UpperCamelCase : float ): assert type(UpperCamelCase ) in (int, float) and decimal == int(UpperCamelCase ) UpperCAmelCase : str = int(UpperCamelCase ) UpperCAmelCase : Optional[int] = """""" UpperCAmelCase : List[str] = False if decimal < 0: UpperCAmelCase : Any = True decimal *= -1 while decimal > 0: UpperCAmelCase , UpperCAmelCase : Dict = divmod(UpperCamelCase , 16 ) UpperCAmelCase : Union[str, Any] = values[remainder] + hexadecimal UpperCAmelCase : int = """0x""" + hexadecimal if negative: UpperCAmelCase : Optional[int] = """-""" + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
109
1
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowerCamelCase : def __init__(self : str , _A : List[Any] , _A : Optional[Any]=None , _A : int=None , _A : List[Any]=None , _A : str="resnet50" , _A : str=3 , _A : str=3_2 , _A : Tuple=3 , _A : Dict=True , _A : List[str]=True , ) -> List[Any]: snake_case = parent snake_case = out_indices if out_indices is not None else [4] snake_case = stage_names snake_case = out_features snake_case = backbone snake_case = batch_size snake_case = image_size snake_case = num_channels snake_case = use_pretrained_backbone snake_case = is_training def UpperCAmelCase(self : Optional[int] ) -> List[Any]: snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case = self.get_config() return config, pixel_values def UpperCAmelCase(self : Optional[Any] ) -> Optional[Any]: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def UpperCAmelCase(self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] ) -> Optional[int]: snake_case = TimmBackbone(config=_A ) model.to(_A ) model.eval() with torch.no_grad(): snake_case = model(_A ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , ) def UpperCAmelCase(self : List[str] ) -> List[Any]: snake_case = self.prepare_config_and_inputs() snake_case , snake_case = config_and_inputs snake_case = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class lowerCamelCase ( A_ , A_ , A_ , unittest.TestCase ): UpperCAmelCase__ : str = (TimmBackbone,) if is_torch_available() else () UpperCAmelCase__ : str = {"feature-extraction": TimmBackbone} if is_torch_available() else {} UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : str = False UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[Any] = False def UpperCAmelCase(self : Optional[Any] ) -> Union[str, Any]: snake_case = TimmBackboneModelTester(self ) snake_case = ConfigTester(self , config_class=_A , has_text_modality=_A ) def UpperCAmelCase(self : List[Any] ) -> int: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]: snake_case = "resnet18" snake_case = "microsoft/resnet-18" snake_case = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A ) snake_case = AutoBackbone.from_pretrained(_A ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) snake_case = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A , out_indices=[1, 2, 3] ) snake_case = AutoBackbone.from_pretrained(_A , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("TimmBackbone doesn't support feed forward chunking" ) def UpperCAmelCase(self : Any ) -> Tuple: pass @unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" ) def UpperCAmelCase(self : int ) -> Optional[int]: pass @unittest.skip("TimmBackbone initialization is managed on the timm side" ) def UpperCAmelCase(self : int ) -> int: pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def UpperCAmelCase(self : int ) -> Union[str, Any]: pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def UpperCAmelCase(self : Optional[Any] ) -> Tuple: pass @unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" ) def UpperCAmelCase(self : int ) -> Optional[int]: pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def UpperCAmelCase(self : List[str] ) -> Dict: pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def UpperCAmelCase(self : Optional[int] ) -> Optional[int]: pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def UpperCAmelCase(self : Union[str, Any] ) -> Optional[int]: pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def UpperCAmelCase(self : Optional[int] ) -> Tuple: pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def UpperCAmelCase(self : Any ) -> Optional[int]: pass @unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." ) def UpperCAmelCase(self : Optional[Any] ) -> List[str]: pass @unittest.skip("TimmBackbone doesn't support output_attentions." ) def UpperCAmelCase(self : Optional[int] ) -> Dict: pass @unittest.skip("Safetensors is not supported by timm." ) def UpperCAmelCase(self : Tuple ) -> Dict: pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def UpperCAmelCase(self : int ) -> str: pass def UpperCAmelCase(self : Tuple ) -> Optional[Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(_A ) snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case = [*signature.parameters.keys()] snake_case = ["pixel_values"] self.assertListEqual(arg_names[:1] , _A ) def UpperCAmelCase(self : str ) -> int: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = True snake_case = self.has_attentions # no need to test all models as different heads yield the same functionality snake_case = self.all_model_classes[0] snake_case = model_class(_A ) model.to(_A ) snake_case = self._prepare_for_class(_A , _A ) snake_case = model(**_A ) snake_case = outputs[0][-1] # Encoder-/Decoder-only models snake_case = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: snake_case = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=_A ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def UpperCAmelCase(self : List[str] ) -> Any: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(_A ) model.to(_A ) model.eval() snake_case = model(**_A ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None snake_case = copy.deepcopy(_A ) snake_case = None snake_case = model_class(_A ) model.to(_A ) model.eval() snake_case = model(**_A ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights snake_case = copy.deepcopy(_A ) snake_case = False snake_case = model_class(_A ) model.to(_A ) model.eval() snake_case = model(**_A )
137
class lowerCamelCase : def __init__(self : List[Any] , _A : str ) -> Any: # we need a list not a string, so do something to change the type snake_case = arr.split("," ) def UpperCAmelCase(self : str ) -> str: snake_case = [int(self.array[0] )] * len(self.array ) snake_case = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): snake_case = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) snake_case = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": _A = input("please input some numbers:") _A = SubArray(whole_array) _A = array.solve_sub_array() print(("the results is:", re))
137
1
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __lowerCamelCase ( snake_case__ = 3 ) -> qiskit.result.counts.Counts: """simple docstring""" if isinstance(snake_case__ ,snake_case__ ): raise TypeError("""number of qubits must be a integer.""" ) if number_of_qubits <= 0: raise ValueError("""number of qubits must be > 0.""" ) if math.floor(snake_case__ ) != number_of_qubits: raise ValueError("""number of qubits must be exact integer.""" ) if number_of_qubits > 10: raise ValueError("""number of qubits too large to simulate(>10).""" ) _SCREAMING_SNAKE_CASE = QuantumRegister(snake_case__ ,"""qr""" ) _SCREAMING_SNAKE_CASE = ClassicalRegister(snake_case__ ,"""cr""" ) _SCREAMING_SNAKE_CASE = QuantumCircuit(snake_case__ ,snake_case__ ) _SCREAMING_SNAKE_CASE = number_of_qubits for i in range(snake_case__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(snake_case__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) ,snake_case__ ,snake_case__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(snake_case__ ,number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(snake_case__ ,snake_case__ ) # simulate with 10000 shots _SCREAMING_SNAKE_CASE = Aer.get_backend("""qasm_simulator""" ) _SCREAMING_SNAKE_CASE = execute(snake_case__ ,snake_case__ ,shots=1_00_00 ) return job.result().get_counts(snake_case__ ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
306
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> int: """simple docstring""" if isinstance(snake_case__ ,snake_case__ ): _SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length, 2) ,snake_case__ ) else: _SCREAMING_SNAKE_CASE = np.full((len(snake_case__ ), sequence_length) ,snake_case__ ) for i, tensor in enumerate(snake_case__ ): if padding_side == "right": if isinstance(snake_case__ ,snake_case__ ): _SCREAMING_SNAKE_CASE = tensor[:sequence_length] else: _SCREAMING_SNAKE_CASE = tensor[:sequence_length] else: if isinstance(snake_case__ ,snake_case__ ): _SCREAMING_SNAKE_CASE = tensor[:sequence_length] else: _SCREAMING_SNAKE_CASE = tensor[:sequence_length] return out_tensor.tolist() def __lowerCamelCase ( snake_case__ ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE = ord(snake_case__ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True _SCREAMING_SNAKE_CASE = unicodedata.category(snake_case__ ) if cat.startswith("""P""" ): return True return False @dataclass class __UpperCAmelCase (_UpperCAmelCase ): __snake_case : PreTrainedTokenizerBase __snake_case : Union[bool, str, PaddingStrategy] = True __snake_case : Optional[int] = None __snake_case : Optional[int] = None __snake_case : int = -100 __snake_case : str = "pt" def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] ): '''simple docstring''' import torch _SCREAMING_SNAKE_CASE = """label""" if """label""" in features[0].keys() else """labels""" _SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None _SCREAMING_SNAKE_CASE = self.tokenizer.pad( UpperCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , ) if labels is None: return batch _SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""] ).shape[1] _SCREAMING_SNAKE_CASE = self.tokenizer.padding_side if padding_side == "right": _SCREAMING_SNAKE_CASE = [ list(UpperCAmelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) for label in labels ] else: _SCREAMING_SNAKE_CASE = [ [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase_ )) + list(UpperCAmelCase_ ) for label in labels ] _SCREAMING_SNAKE_CASE = [feature["""ner_tags"""] for feature in features] _SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , -1 , UpperCAmelCase_ , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = [feature["""original_entity_spans"""] for feature in features] _SCREAMING_SNAKE_CASE = padding_tensor(UpperCAmelCase_ , (-1, -1) , UpperCAmelCase_ , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = {k: torch.tensor(UpperCAmelCase_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
306
1
import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def __UpperCamelCase ( _A ): return 1 / (1 + np.exp(-z )) def __UpperCamelCase ( _A , _A ): return (-y * np.log(_UpperCAmelCase ) - (1 - y) * np.log(1 - h )).mean() def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = np.dot(_UpperCAmelCase , _UpperCAmelCase ) return np.sum(y * scores - np.log(1 + np.exp(_UpperCAmelCase ) ) ) def __UpperCamelCase ( _A , _A , _A , _A=70000 ): lowerCAmelCase_ = np.zeros(x.shape[1] ) for iterations in range(_UpperCAmelCase ): lowerCAmelCase_ = np.dot(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase_ = sigmoid_function(_UpperCAmelCase ) lowerCAmelCase_ = np.dot(x.T , h - y ) / y.size lowerCAmelCase_ = theta - alpha * gradient # updating the weights lowerCAmelCase_ = np.dot(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase_ = sigmoid_function(_UpperCAmelCase ) lowerCAmelCase_ = cost_function(_UpperCAmelCase , _UpperCAmelCase ) if iterations % 100 == 0: print(f"loss: {j} \t" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": _A = datasets.load_iris() _A = iris.data[:, :2] _A = (iris.target != 0) * 1 _A = 0.1 _A = logistic_reg(alpha, x, y, max_iterations=70_000) print('''theta: ''', theta) # printing the theta i.e our weights vector def __UpperCamelCase ( _A ): return sigmoid_function( np.dot(_UpperCAmelCase , _UpperCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') (_A) = (x[:, 0].min(), x[:, 0].max()) (_A) = (x[:, 1].min(), x[:, 1].max()) (_A) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) _A = np.c_[xxa.ravel(), xxa.ravel()] _A = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
365
def __UpperCamelCase ( _A ): lowerCAmelCase_ = [int(_A ) for i in ip_va_address.split('''.''' ) if i.isdigit()] return len(_A ) == 4 and all(0 <= int(_A ) <= 254 for octet in octets ) if __name__ == "__main__": _A = input().strip() _A = '''valid''' if is_ip_va_address_valid(ip) else '''invalid''' print(f"{ip} is a {valid_or_invalid} IP v4 address.")
167
0
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() lowercase__ = logging.get_logger(__name__) def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : List[Any] = RobertaPreLayerNormConfig.from_pretrained( lowercase__ , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict _lowerCamelCase : str = torch.load(hf_hub_download(repo_id=lowercase__ , filename='pytorch_model.bin' ) ) _lowerCamelCase : Dict = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): _lowerCamelCase : Any = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue _lowerCamelCase : Dict = tensor_value _lowerCamelCase : List[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=lowercase__ , config=lowercase__ , state_dict=lowercase__ ) model.save_pretrained(lowercase__ ) # convert tokenizer _lowerCamelCase : Dict = AutoTokenizer.from_pretrained(lowercase__ ) tokenizer.save_pretrained(lowercase__ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowercase__ = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
96
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Dict = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Tuple = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :int = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase :Any = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
331
0
'''simple docstring''' import math class UpperCamelCase : """simple docstring""" def __init__( self : str , UpperCAmelCase_ : Union[str, Any]=0): # a graph with Node 0,1,...,N-1 """simple docstring""" a : Any = n a : Optional[Any] = [ [math.inf for j in range(0 , UpperCAmelCase_)] for i in range(0 , UpperCAmelCase_) ] # adjacency matrix for weight a : Any = [ [math.inf for j in range(0 , UpperCAmelCase_)] for i in range(0 , UpperCAmelCase_) ] # dp[i][j] stores minimum distance from i to j def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int): """simple docstring""" a : Optional[int] = w def SCREAMING_SNAKE_CASE_ ( self : Dict): """simple docstring""" for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): a : Dict = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any): """simple docstring""" return self.dp[u][v] if __name__ == "__main__": UpperCamelCase : Union[str, Any] = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
345
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase ( a_ ): """simple docstring""" A : List[str] = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) A : List[Any] = "CIDAS/clipseg-rd64-refined" A : Optional[Any] = "image_segmenter" A : List[Any] = CLIPSegForImageSegmentation A : Tuple = ["image", "text"] A : Optional[int] = ["image"] def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str): """simple docstring""" requires_backends(self , ['vision']) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt') def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str): """simple docstring""" with torch.no_grad(): a : Union[str, Any] = self.model(**UpperCAmelCase_).logits return logits def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int): """simple docstring""" a : int = outputs.cpu().detach().numpy() a : str = 0 a : str = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta))
345
1
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class UpperCAmelCase ( snake_case_ , unittest.TestCase ): _lowercase: Union[str, Any] = BarthezTokenizer _lowercase: List[str] = BarthezTokenizerFast _lowercase: Tuple = True _lowercase: str = True def lowercase__ ( self : Optional[int] ) -> Any: super().setUp() _lowerCAmelCase = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__snake_case ) _lowerCAmelCase = tokenizer def lowercase__ ( self : Optional[int] ) -> int: _lowerCAmelCase = """<pad>""" _lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case ) def lowercase__ ( self : List[str] ) -> Optional[int]: _lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(__snake_case ) , 10_11_22 ) def lowercase__ ( self : Any ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 ) @require_torch def lowercase__ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _lowerCAmelCase = [0, 57, 30_18, 7_03_07, 91, 2] _lowerCAmelCase = self.tokenizer( __snake_case , max_length=len(__snake_case ) , padding=__snake_case , truncation=__snake_case , return_tensors="""pt""" ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _lowerCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(__snake_case , __snake_case ) def lowercase__ ( self : List[Any] ) -> Any: if not self.test_rust_tokenizer: return _lowerCAmelCase = self.get_tokenizer() _lowerCAmelCase = self.get_rust_tokenizer() _lowerCAmelCase = """I was born in 92000, and this is falsé.""" _lowerCAmelCase = tokenizer.tokenize(__snake_case ) _lowerCAmelCase = rust_tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) _lowerCAmelCase = tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) _lowerCAmelCase = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) self.assertListEqual(__snake_case , __snake_case ) _lowerCAmelCase = self.get_rust_tokenizer() _lowerCAmelCase = tokenizer.encode(__snake_case ) _lowerCAmelCase = rust_tokenizer.encode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) @slow def lowercase__ ( self : str ) -> List[str]: # fmt: off _lowerCAmelCase = {"""input_ids""": [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _lowerCAmelCase = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__snake_case , )
70
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self , a , a=2 , a=3 , a=4 , a=2 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=36 , a=2 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=6 , a=6 , a=3 , a=4 , a=None , a=1000 , ): lowercase__ : List[str] = parent lowercase__ : List[str] = batch_size lowercase__ : int = num_channels lowercase__ : List[Any] = image_size lowercase__ : List[str] = patch_size lowercase__ : List[Any] = is_training lowercase__ : Tuple = use_input_mask lowercase__ : str = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : Any = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : List[Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : int = hidden_act lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : str = max_position_embeddings lowercase__ : List[Any] = type_vocab_size lowercase__ : str = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : Union[str, Any] = coordinate_size lowercase__ : Union[str, Any] = shape_size lowercase__ : Any = num_labels lowercase__ : List[str] = num_choices lowercase__ : Optional[Any] = scope lowercase__ : Optional[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) lowercase__ : Optional[int] = text_seq_length lowercase__ : Optional[int] = (image_size // patch_size) ** 2 + 1 lowercase__ : str = self.text_seq_length + self.image_seq_length def snake_case_ ( self): lowercase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size) lowercase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox) lowercase__ : str = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: lowercase__ : Optional[Any] = bbox[i, j, 3] lowercase__ : List[Any] = bbox[i, j, 1] lowercase__ : List[str] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: lowercase__ : int = bbox[i, j, 2] lowercase__ : List[Any] = bbox[i, j, 0] lowercase__ : Optional[Any] = tmp_coordinate lowercase__ : Dict = tf.constant(a) lowercase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) lowercase__ : Optional[Any] = None if self.use_input_mask: lowercase__ : str = random_attention_mask([self.batch_size, self.text_seq_length]) lowercase__ : Tuple = None if self.use_token_type_ids: lowercase__ : str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size) lowercase__ : List[Any] = None lowercase__ : Optional[int] = None if self.use_labels: lowercase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels) lowercase__ : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def snake_case_ ( self , a , a , a , a , a , a): lowercase__ : str = TFLayoutLMvaModel(config=a) # text + image lowercase__ : List[str] = model(a , pixel_values=a , training=a) lowercase__ : Any = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , ) lowercase__ : List[Any] = model(a , bbox=a , pixel_values=a , training=a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) # text only lowercase__ : List[Any] = model(a , training=a) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size)) # image only lowercase__ : Dict = model({'pixel_values': pixel_values} , training=a) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size)) def snake_case_ ( self , a , a , a , a , a , a , a): lowercase__ : Optional[Any] = self.num_labels lowercase__ : Optional[Any] = TFLayoutLMvaForSequenceClassification(config=a) lowercase__ : List[str] = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def snake_case_ ( self , a , a , a , a , a , a , a): lowercase__ : Tuple = self.num_labels lowercase__ : Dict = TFLayoutLMvaForTokenClassification(config=a) lowercase__ : Any = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels)) def snake_case_ ( self , a , a , a , a , a , a , a): lowercase__ : Optional[int] = 2 lowercase__ : List[str] = TFLayoutLMvaForQuestionAnswering(config=a) lowercase__ : Tuple = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def snake_case_ ( self): lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : Optional[Any] = config_and_inputs lowercase__ : Optional[Any] = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , unittest.TestCase ): __lowerCamelCase : List[str] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) __lowerCamelCase : Dict = ( {"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel} if is_tf_available() else {} ) __lowerCamelCase : Optional[Any] = False __lowerCamelCase : int = False __lowerCamelCase : int = False def snake_case_ ( self , a , a , a , a , a): return True def snake_case_ ( self , a , a , a=False): lowercase__ : Tuple = copy.deepcopy(a) if model_class in get_values(a): lowercase__ : Optional[Any] = { k: tf.tile(tf.expand_dims(a , 1) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(a , tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(a): lowercase__ : Union[str, Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa) elif model_class in get_values(a): lowercase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa) lowercase__ : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa) elif model_class in get_values(a): lowercase__ : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa) elif model_class in get_values(a): lowercase__ : Optional[int] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa) return inputs_dict def snake_case_ ( self): lowercase__ : Tuple = TFLayoutLMvaModelTester(self) lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , hidden_size=37) def snake_case_ ( self): self.config_tester.run_common_tests() def snake_case_ ( self): lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(a) if getattr(a , 'hf_compute_loss' , a): # The number of elements in the loss should be the same as the number of elements in the label lowercase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a) lowercase__ : Union[str, Any] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a)[0] ] lowercase__ : Tuple = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs lowercase__ : Dict = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a) lowercase__ : int = prepared_for_class.pop('input_ids') lowercase__ : Optional[int] = model(a , **a)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss when we mask some positions lowercase__ : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a) lowercase__ : str = prepared_for_class.pop('input_ids') if "labels" in prepared_for_class: lowercase__ : Optional[Any] = prepared_for_class['labels'].numpy() if len(labels.shape) > 1 and labels.shape[1] != 1: lowercase__ : Union[str, Any] = -100 lowercase__ : Optional[Any] = tf.convert_to_tensor(a) lowercase__ : Any = model(a , **a)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) self.assertTrue(not np.any(np.isnan(loss.numpy()))) # Test that model correctly compute the loss with a dict lowercase__ : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a) lowercase__ : Optional[Any] = model(a)[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) # Test that model correctly compute the loss with a tuple lowercase__ : List[str] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a) # Get keys that were added with the _prepare_for_class function lowercase__ : int = prepared_for_class.keys() - inputs_dict.keys() lowercase__ : List[Any] = inspect.signature(model.call).parameters lowercase__ : List[str] = list(signature.keys()) # Create a dictionary holding the location of the tensors in the tuple lowercase__ : Dict = {0: 'input_ids'} for label_key in label_keys: lowercase__ : Tuple = signature_names.index(a) lowercase__ : List[str] = label_key lowercase__ : int = sorted(tuple_index_mapping.items()) # Initialize a list with their default values, update the values and convert to a tuple lowercase__ : List[Any] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default) for index, value in sorted_tuple_index_mapping: lowercase__ : Optional[int] = prepared_for_class[value] lowercase__ : Any = tuple(a) # Send to model lowercase__ : List[str] = model(tuple_input[:-1])[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) def snake_case_ ( self): ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(a , a , a , a , a , a) def snake_case_ ( self): ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__ : Dict = type self.model_tester.create_and_check_model(a , a , a , a , a , a) def snake_case_ ( self): ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( a , a , a , a , a , a , a) def snake_case_ ( self): ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( a , a , a , a , a , a , a) def snake_case_ ( self): ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( a , a , a , a , a , a , a) @slow def snake_case_ ( self): for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = TFLayoutLMvaModel.from_pretrained(a) self.assertIsNotNone(a) def snake_case__ ( ): '''simple docstring''' lowercase__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class SCREAMING_SNAKE_CASE__ (unittest.TestCase ): @cached_property def snake_case_ ( self): return LayoutLMvaImageProcessor(apply_ocr=a) if is_vision_available() else None @slow def snake_case_ ( self): lowercase__ : Optional[int] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base') lowercase__ : Tuple = self.default_image_processor lowercase__ : Union[str, Any] = prepare_img() lowercase__ : Optional[int] = image_processor(images=a , return_tensors='tf').pixel_values lowercase__ : List[Any] = tf.constant([[1, 2]]) lowercase__ : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) , axis=0) # forward pass lowercase__ : List[str] = model(input_ids=a , bbox=a , pixel_values=a , training=a) # verify the logits lowercase__ : Optional[int] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , a) lowercase__ : Union[str, Any] = tf.constant( [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]]) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4))
214
0
'''simple docstring''' import fire from utils import calculate_rouge, save_json def _A (lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _a = [x.strip() for x in open(snake_case__ ).readlines()] _a = [x.strip() for x in open(snake_case__ ).readlines()][: len(snake_case__ )] _a = calculate_rouge(snake_case__ , snake_case__ , **snake_case__ ) if save_path is not None: save_json(snake_case__ , snake_case__ , indent=snake_case__ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
365
'''simple docstring''' import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _A () -> Optional[Any]: '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCAmelCase__ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _A () -> Any: '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _A () -> Dict: '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCAmelCase__ ): http_head('https://huggingface.co' )
104
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCamelCase = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
59
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def UpperCamelCase ( __lowerCamelCase : List[Any] ): return 1.0 / (1.0 + np.exp(-_outputs )) def UpperCamelCase ( __lowerCamelCase : int ): snake_case : Tuple = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase ) snake_case : int = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase ) class UpperCAmelCase ( A_ ): A__ : Any = "sigmoid" A__ : str = "softmax" A__ : int = "none" @add_end_docstrings( A_ ,r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " ,) class UpperCAmelCase ( A_ ): A__ : int = False A__ : Union[str, Any] = ClassificationFunction.NONE def __init__(self : List[str] , **snake_case__ : int ) -> str: '''simple docstring''' super().__init__(**snake_case__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]="" , **snake_case__ : List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = tokenizer_kwargs snake_case : List[Any] = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: snake_case : Optional[int] = self.model.config.return_all_scores if isinstance(snake_case__ , snake_case__ ) or top_k is None: snake_case : List[Any] = top_k snake_case : str = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , snake_case__ , ) if return_all_scores: snake_case : List[str] = None else: snake_case : Optional[int] = 1 if isinstance(snake_case__ , snake_case__ ): snake_case : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: snake_case : Optional[int] = function_to_apply return preprocess_params, {}, postprocess_params def __call__(self : Dict , *snake_case__ : List[str] , **snake_case__ : int ) -> Optional[int]: '''simple docstring''' snake_case : Optional[int] = super().__call__(*snake_case__ , **snake_case__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. snake_case : Tuple = "top_k" not in kwargs if isinstance(args[0] , snake_case__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple , **snake_case__ : Union[str, Any] ) -> Dict[str, GenericTensor]: '''simple docstring''' snake_case : int = self.framework if isinstance(snake_case__ , snake_case__ ): return self.tokenizer(**snake_case__ , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1 and isinstance(inputs[0] , snake_case__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case__ , **snake_case__ ) elif isinstance(snake_case__ , snake_case__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Union[str, Any] ) -> int: '''simple docstring''' return self.model(**snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : Dict=1 , snake_case__ : Tuple=True ) -> str: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: snake_case : Tuple = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: snake_case : Tuple = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: snake_case : Tuple = self.model.config.function_to_apply else: snake_case : int = ClassificationFunction.NONE snake_case : Any = model_outputs["logits"][0] snake_case : List[str] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: snake_case : Optional[Any] = sigmoid(snake_case__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: snake_case : Union[str, Any] = softmax(snake_case__ ) elif function_to_apply == ClassificationFunction.NONE: snake_case : Optional[Any] = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} snake_case : Optional[int] = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(snake_case__ ) ] if not _legacy: dict_scores.sort(key=lambda snake_case__ : x["score"] , reverse=snake_case__ ) if top_k is not None: snake_case : Optional[int] = dict_scores[:top_k] return dict_scores
59
1
"""simple docstring""" a :Optional[Any] = "0.18.2" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
368
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class __a (UpperCamelCase_ , unittest.TestCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :Union[str, Any] = RoFormerTokenizer _SCREAMING_SNAKE_CASE :List[Any] = RoFormerTokenizerFast _SCREAMING_SNAKE_CASE :Tuple = True _SCREAMING_SNAKE_CASE :Dict = True def _a ( self ) -> List[Any]: """simple docstring""" super().setUp() def _a ( self , **_a ) -> Optional[Any]: """simple docstring""" return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_a ) def _a ( self , **_a ) -> List[Any]: """simple docstring""" return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_a ) def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = """永和服装饰品有限公司,今天天气非常好""" SCREAMING_SNAKE_CASE__ : List[Any] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好""" return input_text, output_text def _a ( self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_chinese_input_output_texts() SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize(_a ) self.assertListEqual(_a , output_text.split() ) SCREAMING_SNAKE_CASE__ : int = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : List[str] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_chinese_input_output_texts() SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize(_a ) self.assertListEqual(_a , output_text.split() ) SCREAMING_SNAKE_CASE__ : int = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Optional[Any] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) def _a ( self ) -> List[Any]: """simple docstring""" pass def _a ( self ) -> List[Any]: """simple docstring""" pass def _a ( self ) -> Optional[int]: """simple docstring""" pass
56
0
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast a =datasets.utils.logging.get_logger(__name__) @dataclass class A_ ( datasets.BuilderConfig ): _UpperCAmelCase : int = 10_000 _UpperCAmelCase : Optional[List[str]] = None _UpperCAmelCase : Optional[datasets.Features] = None class A_ ( datasets.ArrowBasedBuilder ): _UpperCAmelCase : int = ParquetConfig def lowerCAmelCase ( self : List[str]): return datasets.DatasetInfo(features=self.config.features) def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict): if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}") __lowerCamelCase : Dict = dl_manager.download_and_extract(self.config.data_files) if isinstance(SCREAMING_SNAKE_CASE__ ,(str, list, tuple)): __lowerCamelCase : List[str] = data_files if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : List[str] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __lowerCamelCase : Optional[Any] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files})] __lowerCamelCase : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __lowerCamelCase : Union[str, Any] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__): with open(SCREAMING_SNAKE_CASE__ ,'rb') as f: __lowerCamelCase : int = datasets.Features.from_arrow_schema(pq.read_schema(SCREAMING_SNAKE_CASE__)) break splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE__ ,gen_kwargs={'files': files})) return splits def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __lowerCamelCase : Optional[int] = table_cast(SCREAMING_SNAKE_CASE__ ,self.info.features.arrow_schema) return pa_table def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : Union[str, Any] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'") for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__)): with open(SCREAMING_SNAKE_CASE__ ,'rb') as f: __lowerCamelCase : Tuple = pq.ParquetFile(SCREAMING_SNAKE_CASE__) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns)): __lowerCamelCase : Tuple = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F"{file_idx}_{batch_idx}", self._cast_table(SCREAMING_SNAKE_CASE__) except ValueError as e: logger.error(F"Failed to read file '{file}' with error {type(SCREAMING_SNAKE_CASE__)}: {e}") raise
73
import os import sys a =os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) a =[ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> int: return AutoConfig.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: return AutoTokenizer.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ ) @add_start_docstrings(AutoModel.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: return AutoModel.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: return AutoModelForCausalLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
73
1
'''simple docstring''' import os _SCREAMING_SNAKE_CASE : str = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = 0 snake_case_ = 0 while index < len(snake_case ) - 1: snake_case_ = SYMBOLS[numerals[index]] snake_case_ = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCamelCase_( snake_case : int ): '''simple docstring''' snake_case_ = "" snake_case_ = num // 1_0_0_0 numerals += m_count * "M" num %= 1_0_0_0 snake_case_ = num // 1_0_0 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 1_0_0 snake_case_ = num // 1_0 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 1_0 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ): '''simple docstring''' snake_case_ = 0 with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea: snake_case_ = filea.readlines() for line in lines: snake_case_ = line.strip() snake_case_ = parse_roman_numerals(snake_case ) snake_case_ = generate_roman_numerals(snake_case ) savings += len(snake_case ) - len(snake_case ) return savings if __name__ == "__main__": print(F"{solution() = }")
92
'''simple docstring''' from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Dict = ["flax"] def __init__( self , *a__ , **a__ ) -> List[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> str: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Dict = ["flax"] def __init__( self , *a__ , **a__ ) -> Dict: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Optional[Any] = ["flax"] def __init__( self , *a__ , **a__ ) -> str: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Optional[Any] = ["flax"] def __init__( self , *a__ , **a__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[str]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Dict: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : List[str] = ["flax"] def __init__( self , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : int = ["flax"] def __init__( self , *a__ , **a__ ) -> List[str]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Any: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Optional[Any] = ["flax"] def __init__( self , *a__ , **a__ ) -> Any: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> int: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Dict = ["flax"] def __init__( self , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : List[str] = ["flax"] def __init__( self , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Dict: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : int = ["flax"] def __init__( self , *a__ , **a__ ) -> Dict: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> int: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Any = ["flax"] def __init__( self , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Tuple: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> str: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : str = ["flax"] def __init__( self , *a__ , **a__ ) -> List[str]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) class _snake_case ( metaclass=lowercase_ ): lowerCAmelCase_ : Tuple = ["flax"] def __init__( self , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> int: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def lowerCAmelCase__ ( cls , *a__ , **a__ ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["flax"] )
92
1
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : Optional[int] = { """google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""", """google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""", """google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""", } class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[int] = '''owlvit_text_model''' def __init__( self , snake_case=4_9408 , snake_case=512 , snake_case=2048 , snake_case=12 , snake_case=8 , snake_case=16 , snake_case="quick_gelu" , snake_case=1e-5 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=0 , snake_case=4_9406 , snake_case=4_9407 , **snake_case , ): super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = intermediate_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = max_position_embeddings snake_case_ = hidden_act snake_case_ = layer_norm_eps snake_case_ = attention_dropout snake_case_ = initializer_range snake_case_ = initializer_factor @classmethod def a ( cls , snake_case , **snake_case ): cls._set_token_in_kwargs(snake_case ) snake_case_ , snake_case_ = cls.get_config_dict(snake_case , **snake_case ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('model_type' ) == "owlvit": snake_case_ = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case , **snake_case ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = '''owlvit_vision_model''' def __init__( self , snake_case=768 , snake_case=3072 , snake_case=12 , snake_case=12 , snake_case=3 , snake_case=768 , snake_case=32 , snake_case="quick_gelu" , snake_case=1e-5 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , **snake_case , ): super().__init__(**snake_case ) snake_case_ = hidden_size snake_case_ = intermediate_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = num_channels snake_case_ = image_size snake_case_ = patch_size snake_case_ = hidden_act snake_case_ = layer_norm_eps snake_case_ = attention_dropout snake_case_ = initializer_range snake_case_ = initializer_factor @classmethod def a ( cls , snake_case , **snake_case ): cls._set_token_in_kwargs(snake_case ) snake_case_ , snake_case_ = cls.get_config_dict(snake_case , **snake_case ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('model_type' ) == "owlvit": snake_case_ = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case , **snake_case ) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[int] = '''owlvit''' __SCREAMING_SNAKE_CASE : Tuple = True def __init__( self , snake_case=None , snake_case=None , snake_case=512 , snake_case=2.65_92 , snake_case=True , **snake_case , ): super().__init__(**snake_case ) if text_config is None: snake_case_ = {} logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' ) if vision_config is None: snake_case_ = {} logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' ) snake_case_ = OwlViTTextConfig(**snake_case ) snake_case_ = OwlViTVisionConfig(**snake_case ) snake_case_ = projection_dim snake_case_ = logit_scale_init_value snake_case_ = return_dict snake_case_ = 1.0 @classmethod def a ( cls , snake_case , **snake_case ): cls._set_token_in_kwargs(snake_case ) snake_case_ , snake_case_ = cls.get_config_dict(snake_case , **snake_case ) if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case , **snake_case ) @classmethod def a ( cls , snake_case , snake_case , **snake_case ): snake_case_ = {} snake_case_ = text_config snake_case_ = vision_config return cls.from_dict(snake_case , **snake_case ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.text_config.to_dict() snake_case_ = self.vision_config.to_dict() snake_case_ = self.__class__.model_type return output class lowercase ( lowercase_ ): @property def a ( self ): return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ] ) @property def a ( self ): return OrderedDict( [ ('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'}), ] ) @property def a ( self ): return 1e-4 def a ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = None , ): snake_case_ = super().generate_dummy_inputs( processor.tokenizer , batch_size=snake_case , seq_length=snake_case , framework=snake_case ) snake_case_ = super().generate_dummy_inputs( processor.image_processor , batch_size=snake_case , framework=snake_case ) return {**text_input_dict, **image_input_dict} @property def a ( self ): return 14
285
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=UpperCamelCase__ ) if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images['url'] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def __lowerCamelCase ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ ) parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ ) return parser.parse_args() if __name__ == "__main__": _UpperCAmelCase : Optional[int] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
285
1
def snake_case_ (__A : int ) -> list: __lowerCAmelCase : Any = int(__A ) if n_element < 1: __lowerCAmelCase : Optional[int] = ValueError("""a should be a positive number""" ) raise my_error __lowerCAmelCase : Tuple = [1] __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] = (0, 0, 0) __lowerCAmelCase : List[Any] = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": __UpperCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") __UpperCAmelCase = hamming(int(n)) print("""-----------------------------------------------------""") print(F'The list with nth numbers is: {hamming_numbers}') print("""-----------------------------------------------------""")
139
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] ) -> None: """simple docstring""" warnings.warn( """The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use GLPNImageProcessor instead.""" , lowerCAmelCase , ) super().__init__(*lowerCAmelCase , **lowerCAmelCase )
139
1
"""simple docstring""" from sklearn.metrics import recall_score import datasets lowerCamelCase__ : List[str] = ''' Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. ''' lowerCamelCase__ : Dict = ''' Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {\'recall\': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {\'recall\': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric(\'recall\') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {\'recall\': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric(\'recall\') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'recall\': array([1., 0., 0.])} ''' lowerCamelCase__ : Union[str, Any] = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _UpperCAmelCase ( datasets.Metric): def __snake_case ( self ) -> List[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def __snake_case ( self , _A , _A , _A=None , _A=1 , _A="binary" , _A=None , _A="warn" , ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : Optional[int] = recall_score( _A , _A , labels=_A , pos_label=_A , average=_A , sample_weight=_A , zero_division=_A , ) return {"recall": float(_A ) if score.size == 1 else score}
246
"""simple docstring""" # Lint as: python3 import itertools import os import re lowerCamelCase__ : str = re.compile(r'''([A-Z]+)([A-Z][a-z])''') lowerCamelCase__ : List[Any] = re.compile(r'''([a-z\d])([A-Z])''') lowerCamelCase__ : int = re.compile(r'''(?<!_)_(?!_)''') lowerCamelCase__ : List[str] = re.compile(r'''(_{2,})''') lowerCamelCase__ : List[Any] = r'''^\w+(\.\w+)*$''' lowerCamelCase__ : str = r'''<>:/\|?*''' def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> str: _UpperCAmelCase : Any = _uppercase_uppercase_re.sub(R"""\1_\2""", _lowerCAmelCase ) _UpperCAmelCase : Tuple = _lowercase_uppercase_re.sub(R"""\1_\2""", _lowerCAmelCase ) return name.lower() def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> Tuple: _UpperCAmelCase : Optional[Any] = _single_underscore_re.split(_lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = [_multiple_underscores_re.split(_lowerCAmelCase ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCAmelCase ) if n != """""" ) def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> str: if os.path.basename(_lowerCAmelCase ) != name: raise ValueError(f'''Should be a dataset name, not a path: {name}''' ) return camelcase_to_snakecase(_lowerCAmelCase ) def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Tuple ) -> int: if os.path.basename(_lowerCAmelCase ) != name: raise ValueError(f'''Should be a dataset name, not a path: {name}''' ) if not re.match(_split_re, _lowerCAmelCase ): raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' ) return f'''{filename_prefix_for_name(_lowerCAmelCase )}-{split}''' def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple=None ) -> List[Any]: _UpperCAmelCase : Optional[int] = filename_prefix_for_split(_lowerCAmelCase, _lowerCAmelCase ) if filetype_suffix: prefix += f'''.{filetype_suffix}''' _UpperCAmelCase : int = os.path.join(_lowerCAmelCase, _lowerCAmelCase ) return f'''{filepath}*''' def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : str, _lowerCAmelCase : List[Any], _lowerCAmelCase : Any=None, _lowerCAmelCase : int=None ) -> str: _UpperCAmelCase : Union[str, Any] = filename_prefix_for_split(_lowerCAmelCase, _lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = os.path.join(_lowerCAmelCase, _lowerCAmelCase ) if shard_lengths: _UpperCAmelCase : List[str] = len(_lowerCAmelCase ) _UpperCAmelCase : List[Any] = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_lowerCAmelCase )] if filetype_suffix: _UpperCAmelCase : Union[str, Any] = [filename + f'''.{filetype_suffix}''' for filename in filenames] return filenames else: _UpperCAmelCase : Any = prefix if filetype_suffix: filename += f'''.{filetype_suffix}''' return [filename]
246
1
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class a ( lowerCAmelCase_ ): _snake_case : Union[List[PIL.Image.Image], np.ndarray] _snake_case : Optional[List[bool]] _snake_case : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
30
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { "Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json", "Salesforce/blip-vqa-capfit-large": ( "https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json" ), "Salesforce/blip-image-captioning-base": ( "https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json" ), "Salesforce/blip-image-captioning-large": ( "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json" ), "Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json", "Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json", "Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json", "Salesforce/blip-itm-large-flikr": ( "https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json" ), } class UpperCamelCase__( __A ): lowerCAmelCase__ : int = 'blip_text_model' def __init__( self ,__UpperCAmelCase=3_05_24 ,__UpperCAmelCase=7_68 ,__UpperCAmelCase=7_68 ,__UpperCAmelCase=30_72 ,__UpperCAmelCase=7_68 ,__UpperCAmelCase=12 ,__UpperCAmelCase=8 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=1e-12 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3_05_22 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0 ,__UpperCAmelCase=1_02 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> Tuple: super().__init__( pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,sep_token_id=__UpperCAmelCase ,**__UpperCAmelCase ,) A__ = vocab_size A__ = hidden_size A__ = encoder_hidden_size A__ = intermediate_size A__ = projection_dim A__ = hidden_dropout_prob A__ = num_hidden_layers A__ = num_attention_heads A__ = max_position_embeddings A__ = layer_norm_eps A__ = hidden_act A__ = initializer_range A__ = attention_probs_dropout_prob A__ = is_decoder A__ = use_cache @classmethod def snake_case__ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig": cls._set_token_in_kwargs(__UpperCAmelCase ) A__ , A__ = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase ) # get the text config dict if we are loading from BlipConfig if config_dict.get('model_type' ) == "blip": A__ = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase ) class UpperCamelCase__( __A ): lowerCAmelCase__ : str = 'blip_vision_model' def __init__( self ,__UpperCAmelCase=7_68 ,__UpperCAmelCase=30_72 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3_84 ,__UpperCAmelCase=16 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=1e-5 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=1e-10 ,**__UpperCAmelCase ,) -> Any: super().__init__(**__UpperCAmelCase ) A__ = hidden_size A__ = intermediate_size A__ = projection_dim A__ = num_hidden_layers A__ = num_attention_heads A__ = patch_size A__ = image_size A__ = initializer_range A__ = attention_dropout A__ = layer_norm_eps A__ = hidden_act @classmethod def snake_case__ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig": cls._set_token_in_kwargs(__UpperCAmelCase ) A__ , A__ = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('model_type' ) == "blip": A__ = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase ) class UpperCamelCase__( __A ): lowerCAmelCase__ : List[Any] = 'blip' lowerCAmelCase__ : int = True def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=2.6_5_9_2 ,__UpperCAmelCase=2_56 ,**__UpperCAmelCase ,) -> Dict: super().__init__(**__UpperCAmelCase ) if text_config is None: A__ = {} logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' ) if vision_config is None: A__ = {} logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' ) A__ = BlipTextConfig(**__UpperCAmelCase ) A__ = BlipVisionConfig(**__UpperCAmelCase ) A__ = self.vision_config.hidden_size A__ = projection_dim A__ = logit_scale_init_value A__ = 1.0 A__ = 0.0_2 A__ = image_text_hidden_size @classmethod def snake_case__ ( cls ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__UpperCAmelCase ) def snake_case__ ( self ) -> List[str]: A__ = copy.deepcopy(self.__dict__ ) A__ = self.text_config.to_dict() A__ = self.vision_config.to_dict() A__ = self.__class__.model_type return output
221
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class UpperCamelCase__( __A ): lowerCAmelCase__ : Union[str, Any] = 'transfo-xl' lowerCAmelCase__ : Any = ['mems'] lowerCAmelCase__ : Tuple = { 'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,__UpperCAmelCase=26_77_35 ,__UpperCAmelCase=[2_00_00, 4_00_00, 20_00_00] ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=16 ,__UpperCAmelCase=64 ,__UpperCAmelCase=40_96 ,__UpperCAmelCase=4 ,__UpperCAmelCase=False ,__UpperCAmelCase=18 ,__UpperCAmelCase=16_00 ,__UpperCAmelCase=10_00 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=0 ,__UpperCAmelCase=-1 ,__UpperCAmelCase=True ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase="normal" ,__UpperCAmelCase=0.0_1 ,__UpperCAmelCase=0.0_1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1e-5 ,__UpperCAmelCase=0 ,**__UpperCAmelCase ,) -> Tuple: A__ = vocab_size A__ = [] self.cutoffs.extend(__UpperCAmelCase ) if proj_share_all_but_first: A__ = [False] + [True] * len(self.cutoffs ) else: A__ = [False] + [False] * len(self.cutoffs ) A__ = d_model A__ = d_embed A__ = d_head A__ = d_inner A__ = div_val A__ = pre_lnorm A__ = n_layer A__ = n_head A__ = mem_len A__ = same_length A__ = attn_type A__ = clamp_len A__ = sample_softmax A__ = adaptive A__ = dropout A__ = dropatt A__ = untie_r A__ = init A__ = init_range A__ = proj_init_std A__ = init_std A__ = layer_norm_epsilon super().__init__(eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ) @property def snake_case__ ( self ) -> Optional[Any]: # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case__ ( self ,__UpperCAmelCase ) -> int: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
221
1
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def UpperCAmelCase__ ( *snake_case__ , **snake_case__ ) -> List[Any]: '''simple docstring''' pass @is_pipeline_test @require_torch @require_vision class __snake_case ( unittest.TestCase ): __lowerCamelCase : Optional[int] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Any: '''simple docstring''' UpperCAmelCase : List[str] =pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) UpperCAmelCase : Union[str, Any] =[ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Dict: '''simple docstring''' UpperCAmelCase : Any =vqa_pipeline(snake_case__ , top_k=1 ) self.assertEqual( snake_case__ , [ [{'''score''': ANY(snake_case__ ), '''answer''': ANY(snake_case__ )}], [{'''score''': ANY(snake_case__ ), '''answer''': ANY(snake_case__ )}], ] , ) @require_torch def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Optional[Any] =pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) UpperCAmelCase : Union[str, Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase : List[str] ='''How many cats are there?''' UpperCAmelCase : Any =vqa_pipeline(image=snake_case__ , question='''How many cats are there?''' , top_k=2 ) self.assertEqual( snake_case__ , [{'''score''': ANY(snake_case__ ), '''answer''': ANY(snake_case__ )}, {'''score''': ANY(snake_case__ ), '''answer''': ANY(snake_case__ )}] ) UpperCAmelCase : List[str] =vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( snake_case__ , [{'''score''': ANY(snake_case__ ), '''answer''': ANY(snake_case__ )}, {'''score''': ANY(snake_case__ ), '''answer''': ANY(snake_case__ )}] ) @slow @require_torch def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase : List[Any] =pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' ) UpperCAmelCase : List[str] ='''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase : int ='''How many cats are there?''' UpperCAmelCase : Optional[int] =vqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] ) UpperCAmelCase : Dict =vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] ) UpperCAmelCase : str =vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [[{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''' ) def UpperCAmelCase__ ( self ) -> Any: '''simple docstring''' pass
78
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __snake_case = { '''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''LlamaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''LlamaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''LlamaForCausalLM''', '''LlamaModel''', '''LlamaPreTrainedModel''', '''LlamaForSequenceClassification''', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
78
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } __lowerCamelCase = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } __lowerCamelCase = {"""facebook/blenderbot_small-90M""": 5_12} def UpperCamelCase ( __lowerCamelCase : Any ): snake_case : Optional[Any] = set() snake_case : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case : Tuple = char snake_case : int = set(__lowerCamelCase ) return pairs class UpperCAmelCase ( A_ ): A__ : str = VOCAB_FILES_NAMES A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Optional[int] = ["input_ids", "attention_mask"] def __init__(self : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Dict="__start__" , snake_case__ : Any="__end__" , snake_case__ : Tuple="__unk__" , snake_case__ : Dict="__null__" , **snake_case__ : List[Any] , ) -> List[str]: '''simple docstring''' super().__init__(unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ ) with open(snake_case__ , encoding="utf-8" ) as vocab_handle: snake_case : Optional[int] = json.load(snake_case__ ) snake_case : Any = {v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: snake_case : List[Any] = merges_handle.read().split("\n" )[1:-1] snake_case : int = [tuple(merge.split() ) for merge in merges] snake_case : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) snake_case : Optional[Any] = {} @property def _SCREAMING_SNAKE_CASE (self : str ) -> int: '''simple docstring''' return len(self.encoder ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str ) -> str: '''simple docstring''' if token in self.cache: return self.cache[token] snake_case : Union[str, Any] = re.sub("([.,!?()])" , R" \1" , snake_case__ ) snake_case : int = re.sub("(')" , R" \1 " , snake_case__ ) snake_case : str = re.sub(R"\s{2,}" , " " , snake_case__ ) if "\n" in token: snake_case : List[Any] = token.replace("\n" , " __newln__" ) snake_case : Optional[Any] = token.split(" " ) snake_case : str = [] for token in tokens: if not len(snake_case__ ): continue snake_case : Any = token.lower() snake_case : Any = tuple(snake_case__ ) snake_case : str = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) snake_case : Optional[int] = get_pairs(snake_case__ ) if not pairs: words.append(snake_case__ ) continue while True: snake_case : Optional[int] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break snake_case , snake_case : Tuple = bigram snake_case : Tuple = [] snake_case : str = 0 while i < len(snake_case__ ): try: snake_case : str = word.index(snake_case__ , snake_case__ ) new_word.extend(word[i:j] ) snake_case : Dict = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case : Optional[Any] = tuple(snake_case__ ) snake_case : List[Any] = new_word if len(snake_case__ ) == 1: break else: snake_case : Any = get_pairs(snake_case__ ) snake_case : List[str] = "@@ ".join(snake_case__ ) snake_case : Union[str, Any] = word[:-4] snake_case : int = word words.append(snake_case__ ) return " ".join(snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str ) -> List[str]: '''simple docstring''' snake_case : Tuple = [] snake_case : Tuple = re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> int: '''simple docstring''' snake_case : Tuple = token.lower() return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : int ) -> str: '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : List[str] ) -> str: '''simple docstring''' snake_case : str = " ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case : int = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) snake_case : List[Any] = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(snake_case__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" ) snake_case : Tuple = 0 with open(snake_case__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) snake_case : List[Any] = token_index writer.write(" ".join(snake_case__ ) + "\n" ) index += 1 return vocab_file, merge_file
59
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class UpperCAmelCase : def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]: '''simple docstring''' logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) snake_case : Optional[Any] = model snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ ) snake_case : int = kwargs.get("latest_model_name" , snake_case__ ) def __call__(self : Tuple , **snake_case__ : str ) -> List[str]: '''simple docstring''' snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()} return self.model.run(snake_case__ , snake_case__ ) @staticmethod def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any: '''simple docstring''' if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) snake_case : Optional[int] = "CPUExecutionProvider" return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]: '''simple docstring''' snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name ) snake_case : str = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ ) if src_path.exists(): snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str: '''simple docstring''' if os.path.isfile(snake_case__ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(snake_case__ , exist_ok=snake_case__ ) # saving model weights/files self._save_pretrained(snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple: '''simple docstring''' snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(snake_case__ ): snake_case : Any = OnnxRuntimeModel.load_model( os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ ) snake_case : Union[str, Any] = Path(snake_case__ ) # load model from hub else: # download model snake_case : Dict = hf_hub_download( repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , ) snake_case : List[Any] = Path(snake_case__ ).parent snake_case : Union[str, Any] = Path(snake_case__ ).name snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ ) return cls(model=snake_case__ , **snake_case__ ) @classmethod def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = None if len(str(snake_case__ ).split("@" ) ) == 2: snake_case , snake_case : int = model_id.split("@" ) return cls._from_pretrained( model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
59
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class lowercase__ : def __init__( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]=13 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=1000 , ): SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_mask SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = scope SCREAMING_SNAKE_CASE__ = range_bbox def A_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE__ = bbox[i, j, 3] SCREAMING_SNAKE_CASE__ = bbox[i, j, 1] SCREAMING_SNAKE_CASE__ = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE__ = bbox[i, j, 2] SCREAMING_SNAKE_CASE__ = bbox[i, j, 0] SCREAMING_SNAKE_CASE__ = t SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(_lowercase ) SCREAMING_SNAKE_CASE__ = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE__ = TFLayoutLMModel(config=_lowercase ) SCREAMING_SNAKE_CASE__ = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) SCREAMING_SNAKE_CASE__ = model(_lowercase , _lowercase , token_type_ids=_lowercase ) SCREAMING_SNAKE_CASE__ = model(_lowercase , _lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE__ = TFLayoutLMForMaskedLM(config=_lowercase ) SCREAMING_SNAKE_CASE__ = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = TFLayoutLMForSequenceClassification(config=_lowercase ) SCREAMING_SNAKE_CASE__ = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = TFLayoutLMForTokenClassification(config=_lowercase ) SCREAMING_SNAKE_CASE__ = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE__ = TFLayoutLMForQuestionAnswering(config=_lowercase ) SCREAMING_SNAKE_CASE__ = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self : int ): SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A__ : List[str] =( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) A__ : List[str] =( { """feature-extraction""": TFLayoutLMModel, """fill-mask""": TFLayoutLMForMaskedLM, """text-classification""": TFLayoutLMForSequenceClassification, """token-classification""": TFLayoutLMForTokenClassification, """zero-shot""": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) A__ : str =False A__ : Union[str, Any] =True A__ : Tuple =1_0 def A_ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ = TFLayoutLMModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 ) def A_ ( self : List[str] ): self.config_tester.run_common_tests() def A_ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowercase ) def A_ ( self : int ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowercase ) def A_ ( self : str ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowercase ) def A_ ( self : Tuple ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowercase ) def A_ ( self : int ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowercase ) @slow def A_ ( self : Optional[int] ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = TFLayoutLMModel.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def A_ ( self : Optional[Any] ): pass def _lowercase ( ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class lowercase__ ( unittest.TestCase ): @slow def A_ ( self : Any ): SCREAMING_SNAKE_CASE__ = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = prepare_layoutlm_batch_inputs() # forward pass SCREAMING_SNAKE_CASE__ = model(input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) # test the sequence output on [0, :3, :3] SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1e-3 ) ) # test the pooled output on [1, :3] SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowercase , atol=1e-3 ) ) @slow def A_ ( self : int ): # initialize model with randomly initialized sequence classification head SCREAMING_SNAKE_CASE__ = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = prepare_layoutlm_batch_inputs() # forward pass SCREAMING_SNAKE_CASE__ = model( input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar SCREAMING_SNAKE_CASE__ = outputs.loss SCREAMING_SNAKE_CASE__ = (2,) self.assertEqual(loss.shape , _lowercase ) # test the shape of the logits SCREAMING_SNAKE_CASE__ = outputs.logits SCREAMING_SNAKE_CASE__ = (2, 2) self.assertEqual(logits.shape , _lowercase ) @slow def A_ ( self : Union[str, Any] ): # initialize model with randomly initialized token classification head SCREAMING_SNAKE_CASE__ = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = prepare_layoutlm_batch_inputs() # forward pass SCREAMING_SNAKE_CASE__ = model( input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) # test the shape of the logits SCREAMING_SNAKE_CASE__ = outputs.logits SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _lowercase ) @slow def A_ ( self : Union[str, Any] ): # initialize model with randomly initialized token classification head SCREAMING_SNAKE_CASE__ = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = prepare_layoutlm_batch_inputs() # forward pass SCREAMING_SNAKE_CASE__ = model(input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) # test the shape of the logits SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _lowercase ) self.assertEqual(outputs.end_logits.shape , _lowercase )
366
import math def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> float: '''simple docstring''' if initial_intensity < 0: raise ValueError('The value of intensity cannot be negative' ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(UpperCamelCase_ ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="""malus_law""")
169
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None ) -> List[Any]: __lowerCamelCase : str = np.random.default_rng(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = length __lowerCamelCase : int = rng.normal(size=(length,) ).astype(np.floataa ) __lowerCamelCase : Tuple = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> Union[str, Any]: return self.length def __getitem__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: return {"x": self.x[i], "y": self.y[i]} class UpperCAmelCase_ (torch.nn.Module ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=False ) -> Dict: super().__init__() __lowerCamelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __lowerCamelCase : Dict = True def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None ) -> List[Any]: if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __lowerCamelCase : List[Any] = False return x * self.a[0] + self.b[0] class UpperCAmelCase_ (torch.nn.Module ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=False ) -> Dict: super().__init__() __lowerCamelCase : Union[str, Any] = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ).float() ) __lowerCamelCase : Any = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ).float() ) __lowerCamelCase : List[Any] = True def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]: if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __lowerCamelCase : Any = False return x * self.a + self.b def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int = 16 ) -> str: from datasets import load_dataset from transformers import AutoTokenizer __lowerCamelCase : Dict = AutoTokenizer.from_pretrained('bert-base-cased' ) __lowerCamelCase : List[str] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'} __lowerCamelCase : Dict = load_dataset('csv' , data_files=UpperCAmelCase_ ) __lowerCamelCase : Optional[Any] = datasets['train'].unique('label' ) __lowerCamelCase : List[str] = {v: i for i, v in enumerate(UpperCAmelCase_ )} def tokenize_function(UpperCAmelCase_ : str ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase : int = tokenizer( examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' ) if "label" in examples: __lowerCamelCase : Optional[int] = [label_to_id[l] for l in examples['label']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __lowerCamelCase : str = datasets.map( UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['sentence1', 'sentence2', 'label'] , ) def collate_fn(UpperCAmelCase_ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_28 , return_tensors='pt' ) return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __lowerCamelCase : List[str] = DataLoader(tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=2 ) __lowerCamelCase : Union[str, Any] = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=1 ) return train_dataloader, eval_dataloader
185
'''simple docstring''' from importlib import import_module from .logging import get_logger A__ : str = get_logger(__name__) class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]: __lowerCamelCase : List[str] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('__' ): setattr(self , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : Optional[int] = module._original_module if isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) else module class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Tuple = [] def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]: __lowerCamelCase : Optional[int] = obj __lowerCamelCase : List[Any] = target __lowerCamelCase : Union[str, Any] = new __lowerCamelCase : Union[str, Any] = target.split('.' )[0] __lowerCamelCase : Dict = {} __lowerCamelCase : Dict = attrs or [] def __enter__( self ) -> Optional[int]: *__lowerCamelCase , __lowerCamelCase : int = self.target.split('.' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(SCREAMING_SNAKE_CASE_ ) ): try: __lowerCamelCase : Optional[int] = import_module('.'.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __lowerCamelCase : List[Any] = getattr(self.obj , SCREAMING_SNAKE_CASE_ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __lowerCamelCase : Optional[Any] = obj_attr # patch at top level setattr(self.obj , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(SCREAMING_SNAKE_CASE_ , attrs=self.attrs ) ) __lowerCamelCase : str = getattr(self.obj , SCREAMING_SNAKE_CASE_ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , attrs=self.attrs ) ) __lowerCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # finally set the target attribute setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __lowerCamelCase : Union[str, Any] = getattr(import_module('.'.join(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , SCREAMING_SNAKE_CASE_ ) is attr_value: __lowerCamelCase : Optional[int] = getattr(self.obj , SCREAMING_SNAKE_CASE_ ) setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __lowerCamelCase : List[Any] = globals()['__builtins__'][target_attr] setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new ) else: raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__( self , *SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: for attr in list(self.original ): setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.original.pop(SCREAMING_SNAKE_CASE_ ) ) def lowercase_ ( self ) -> Optional[int]: self.__enter__() self._active_patches.append(self ) def lowercase_ ( self ) -> str: try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
185
1
import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCAmelCase_ = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ = importlib.util.spec_from_file_location( 'transformers', os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) UpperCAmelCase_ = spec.loader.load_module() UpperCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCAmelCase_ = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)') UpperCAmelCase_ = { 'CLIPConfigMixin', 'DecisionTransformerConfigMixin', 'EncoderDecoderConfigMixin', 'RagConfigMixin', 'SpeechEncoderDecoderConfigMixin', 'VisionEncoderDecoderConfigMixin', 'VisionTextDualEncoderConfigMixin', } def lowerCAmelCase_ ( ) -> List[Any]: UpperCamelCase__ : Dict = [] for config_class in list(CONFIG_MAPPING.values() ): UpperCamelCase__ : Union[str, Any] = False # source code of `config_class` UpperCamelCase__ : Union[str, Any] = inspect.getsource(__UpperCAmelCase ) UpperCamelCase__ : str = _re_checkpoint.findall(__UpperCAmelCase ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` UpperCamelCase__ ,UpperCamelCase__ : Any = checkpoint # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase__ : Dict = f"https://huggingface.co/{ckpt_name}" if ckpt_link == ckpt_link_from_name: UpperCamelCase__ : Union[str, Any] = True break UpperCamelCase__ : Any = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: UpperCamelCase__ : List[str] = '''\n'''.join(sorted(__UpperCAmelCase ) ) raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
247
import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'): UpperCAmelCase_ = True from torch.cuda.amp import autocast UpperCAmelCase_ = logging.getLogger(__name__) @dataclass class lowercase__ : '''simple docstring''' a : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) a : Optional[str] = field( default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) a : Optional[bool] = field( default=__lowerCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) a : Optional[bool] = field( default=__lowerCamelCase , metadata={"help": "Whether to log verbose messages or not."} , ) a : Optional[float] = field( default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} ) a : Optional[float] = field( default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} ) a : Optional[float] = field( default=0.9_9_9_9_9_5 , metadata={"help": "Decay of gumbel temperature during training."} ) def lowerCAmelCase_ ( __UpperCAmelCase: ModelArguments , __UpperCAmelCase: TrainingArguments ) -> Any: logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCamelCase__ : Tuple = logging.WARNING if model_args.verbose_logging: UpperCamelCase__ : List[Any] = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): UpperCamelCase__ : Dict = logging.INFO logger.setLevel(__UpperCAmelCase ) @dataclass class lowercase__ : '''simple docstring''' a : str = field( default=__lowerCamelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) a : Optional[str] = field( default=__lowerCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) a : Optional[str] = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) a : Optional[str] = field( default="validation" , metadata={ "help": ( "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" ) } , ) a : Optional[str] = field( default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , ) a : bool = field( default=__lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) a : Optional[int] = field( default=1 , metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" } , ) a : Optional[int] = field( default=__lowerCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , ) a : Optional[float] = field( default=2_0.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} ) @dataclass class lowercase__ : '''simple docstring''' a : WavaVecaForPreTraining a : WavaVecaFeatureExtractor a : Union[bool, str] = "longest" a : Optional[int] = None a : Optional[int] = None def __call__( self, __magic_name__ ) -> Dict[str, torch.Tensor]: """simple docstring""" # reformat list to dict and set to pytorch format UpperCamelCase__ : List[Any] = self.feature_extractor.pad( __magic_name__, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', ) UpperCamelCase__ : Dict = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] ) UpperCamelCase__ : Union[str, Any] = batch['''input_values'''].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula UpperCamelCase__ : List[str] = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to( torch.long ) UpperCamelCase__ : Dict = torch.zeros( (batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['''input_values'''].device ) # these two operations makes sure that all values # before the output lengths indices are attended to UpperCamelCase__ : str = 1 UpperCamelCase__ : Union[str, Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices UpperCamelCase__ : Dict = _compute_mask_indices( (batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__magic_name__, min_masks=2, ) return batch class lowercase__ ( __lowerCamelCase ): '''simple docstring''' def __init__( self, *__magic_name__, __magic_name__=1, __magic_name__=0, __magic_name__=1.0, **__magic_name__ ) -> Dict: """simple docstring""" super().__init__(*__magic_name__, **__magic_name__ ) UpperCamelCase__ : Any = 0 UpperCamelCase__ : List[Any] = max_gumbel_temp UpperCamelCase__ : List[str] = min_gumbel_temp UpperCamelCase__ : Any = gumbel_temp_decay def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> torch.Tensor: """simple docstring""" model.train() UpperCamelCase__ : str = self._prepare_inputs(__magic_name__ ) if self.use_amp: with autocast(): UpperCamelCase__ : Optional[Any] = self.compute_loss(__magic_name__, __magic_name__ ) else: UpperCamelCase__ : Tuple = self.compute_loss(__magic_name__, __magic_name__ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": UpperCamelCase__ : Any = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": UpperCamelCase__ : List[str] = loss.sum() / (inputs['''mask_time_indices''']).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" ) if self.args.gradient_accumulation_steps > 1: UpperCamelCase__ : Tuple = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(__magic_name__ ).backward() elif self.use_apex: with amp.scale_loss(__magic_name__, self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(__magic_name__ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) ) return loss.detach() def lowerCAmelCase_ ( ) -> str: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = parser.parse_args_into_dataclasses() configure_logger(__UpperCAmelCase , __UpperCAmelCase ) # Downloading and loading a dataset from the hub. UpperCamelCase__ : str = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" UpperCamelCase__ : Any = DatasetDict() UpperCamelCase__ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , ) UpperCamelCase__ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" UpperCamelCase__ : int = DatasetDict() UpperCamelCase__ : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , ) UpperCamelCase__ : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported UpperCamelCase__ : str = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__UpperCAmelCase ) def prepare_dataset(__UpperCAmelCase: Union[str, Any] ): # check that all files have the correct sampling rate UpperCamelCase__ ,UpperCamelCase__ : List[str] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays UpperCamelCase__ : Any = datasets.map( __UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names ) # filter audio files that are too long UpperCamelCase__ : Tuple = vectorized_datasets.filter( lambda __UpperCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(__UpperCAmelCase: Optional[int] ): return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` UpperCamelCase__ : Any = vectorized_datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 UpperCamelCase__ : int = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( '''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and''' ''' ``config.feat_extract_norm=\'layer\'''' ) UpperCamelCase__ : Optional[int] = WavaVecaForPreTraining(__UpperCAmelCase ) UpperCamelCase__ : List[str] = DataCollatorForWavaVecaPretraining(model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase ) UpperCamelCase__ : List[Any] = WavaVecaPreTrainer( model=__UpperCAmelCase , data_collator=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=__UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
247
1
def UpperCAmelCase ( a_ ) -> int: """simple docstring""" __A = len(a_ ) __A = len(matrix[0] ) __A = min(a_ , a_ ) for row in range(a_ ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , a_ ): __A = matrix[col][row] / matrix[row][row] for i in range(a_ , a_ ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows __A = True for i in range(row + 1 , a_ ): if matrix[i][row] != 0: __A , __A = matrix[i], matrix[row] __A = False break if reduce: rank -= 1 for i in range(a_ ): __A = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
15
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE :int = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE :Union[str, Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } SCREAMING_SNAKE_CASE :int = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = [] def __init__( self : Any ,A : List[str] ,A : str="<unk>" ,A : int="<s>" ,A : Union[str, Any]="</s>" ,A : List[str]="<pad>" ,A : int="[SEP]" ,A : Optional[Any]="[MASK]" ,A : Tuple="[CLS]" ,A : Optional[Dict[str, Any]] = None ,**A : Any ,): __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token __A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sep_token=A ,mask_token=A ,cls_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,) __A = vocab_file __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A ) @property def UpperCamelCase_ ( self : List[str] ): return self.sp_model.get_piece_size() def UpperCamelCase_ ( self : Optional[Any] ): __A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ): __A = self.__dict__.copy() __A = None return state def __setstate__( self : str ,A : Optional[Any] ): __A = d # for backward compatibility if not hasattr(self ,"sp_model_kwargs" ): __A = {} __A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase_ ( self : Any ,A : str ): return self.sp_model.encode(A ,out_type=A ) def UpperCamelCase_ ( self : List[str] ,A : Tuple ): return self.sp_model.piece_to_id(A ) def UpperCamelCase_ ( self : List[Any] ,A : Tuple ): __A = self.sp_model.IdToPiece(A ) return token def UpperCamelCase_ ( self : List[Any] ,A : int ): __A = [] __A = "" __A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token __A = True __A = [] else: current_sub_tokens.append(A ) __A = False out_string += self.sp_model.decode(A ) return out_string.strip() def UpperCamelCase_ ( self : Tuple ,A : List[int] ,A : bool = False ,A : bool = None ,A : bool = True ,**A : Union[str, Any] ,): __A = kwargs.pop("use_source_tokenizer" ,A ) __A = self.convert_ids_to_tokens(A ,skip_special_tokens=A ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __A = [] __A = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) __A = [] sub_texts.append(A ) else: current_sub_text.append(A ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __A = re.sub(R" (\[(MASK|SEP)\])" ,R"\1" ," ".join(A ) ) else: __A = "".join(A ) __A = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __A = self.clean_up_tokenization(A ) return clean_text else: return text def UpperCamelCase_ ( self : str ,A : str ,A : Optional[str] = None ): if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __A = os.path.join( A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A ) elif not os.path.isfile(self.vocab_file ): with open(A ,"wb" ) as fi: __A = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,) def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __A = [self.cls_token_id] __A = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : Optional[int] ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1] def UpperCamelCase_ ( self : Any ,A : List[int] ,A : Optional[List[int]] = None ): __A = [self.sep_token_id] __A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
15
1
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __A( unittest.TestCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=4 , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_attention_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_choices def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_attention_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs UpperCamelCase__ = True UpperCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase_ (self ): UpperCamelCase__ = FlaxRobertaPreLayerNormModelTester(self ) @slow def UpperCAmelCase_ (self ): for model_class_name in self.all_model_classes: UpperCamelCase__ = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @require_flax class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = [1, 11, 5_02_65] self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE_ ) # compare the actual values for a slice. UpperCamelCase__ = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0] # compare the actual values for a slice. UpperCamelCase__ = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
178
from PIL import Image def __magic_name__ ( __a : Image ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = image.size UpperCamelCase__ = 0 UpperCamelCase__ = image.load() for i in range(__a ): for j in range(__a ): UpperCamelCase__ = pixels[j, i] mean += pixel mean //= width * height for j in range(__a ): for i in range(__a ): UpperCamelCase__ = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": lowerCamelCase_ = mean_threshold(Image.open('''path_to_image''').convert('''L''')) image.save('''output_image_path''')
178
1
"""simple docstring""" class a__ : def __init__( self : List[Any], lowerCAmelCase : int ) -> List[Any]: lowercase : Tuple = n lowercase : List[str] = [None] * self.n lowercase : Optional[Any] = 0 # index of the first element lowercase : List[Any] = 0 lowercase : Tuple = 0 def __len__( self : Union[str, Any] ) -> int: return self.size def lowercase ( self : Optional[Any] ) -> bool: return self.size == 0 def lowercase ( self : Union[str, Any] ) -> Tuple: return False if self.is_empty() else self.array[self.front] def lowercase ( self : List[str], lowerCAmelCase : List[Any] ) -> str: if self.size >= self.n: raise Exception('QUEUE IS FULL' ) lowercase : List[Any] = data lowercase : Tuple = (self.rear + 1) % self.n self.size += 1 return self def lowercase ( self : Optional[int] ) -> List[str]: if self.size == 0: raise Exception('UNDERFLOW' ) lowercase : List[Any] = self.array[self.front] lowercase : Any = None lowercase : Union[str, Any] = (self.front + 1) % self.n self.size -= 1 return temp
255
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ): _lowerCamelCase = StableDiffusionDiffEditPipeline _lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} _lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} _lowerCamelCase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowerCamelCase = frozenset([] ) def lowercase ( self : Any ) -> Dict: torch.manual_seed(0 ) lowercase : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase, ) lowercase : Tuple = DDIMScheduler( beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_one=lowerCAmelCase, ) lowercase : Any = DDIMInverseScheduler( beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_zero=lowerCAmelCase, ) torch.manual_seed(0 ) lowercase : int = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) lowercase : List[str] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, ) lowercase : str = CLIPTextModel(lowerCAmelCase ) lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowercase : Tuple = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowercase ( self : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Tuple=0 ) -> Union[str, Any]: lowercase : List[Any] = floats_tensor((1, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowercase : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) if str(lowerCAmelCase ).startswith('mps' ): lowercase : Optional[Any] = torch.manual_seed(lowerCAmelCase ) else: lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowercase : Tuple = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowercase ( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Dict=0 ) -> Optional[Any]: lowercase : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowercase : List[str] = image.cpu().permute(0, 2, 3, 1 )[0] lowercase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' ) if str(lowerCAmelCase ).startswith('mps' ): lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase ) else: lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowercase : List[Any] = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowercase ( self : Optional[int], lowerCAmelCase : Any, lowerCAmelCase : List[str]=0 ) -> Union[str, Any]: lowercase : Optional[int] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowercase : Tuple = image.cpu().permute(0, 2, 3, 1 )[0] lowercase : Tuple = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' ) if str(lowerCAmelCase ).startswith('mps' ): lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase ) else: lowercase : List[str] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowercase : Union[str, Any] = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def lowercase ( self : Optional[int] ) -> str: if not hasattr(self.pipeline_class, '_optional_components' ): return lowercase : Optional[int] = self.get_dummy_components() lowercase : int = self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowercase : List[Any] = self.get_dummy_inputs(lowerCAmelCase ) lowercase : Any = pipe(**lowerCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCAmelCase ) lowercase : Any = self.pipeline_class.from_pretrained(lowerCAmelCase ) pipe_loaded.to(lowerCAmelCase ) pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase ) for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCAmelCase, lowerCAmelCase ) is None, f'''`{optional_component}` did not stay set to None after loading.''', ) lowercase : Tuple = self.get_dummy_inputs(lowerCAmelCase ) lowercase : Optional[Any] = pipe_loaded(**lowerCAmelCase )[0] lowercase : List[Any] = np.abs(output - output_loaded ).max() self.assertLess(lowerCAmelCase, 1e-4 ) def lowercase ( self : Any ) -> str: lowercase : Union[str, Any] = 'cpu' lowercase : Optional[int] = self.get_dummy_components() lowercase : List[str] = self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowercase : Any = self.get_dummy_mask_inputs(lowerCAmelCase ) lowercase : str = pipe.generate_mask(**lowerCAmelCase ) lowercase : str = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) lowercase : List[str] = np.array([0] * 9 ) lowercase : Dict = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCAmelCase, 1e-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def lowercase ( self : int ) -> str: lowercase : int = 'cpu' lowercase : Dict = self.get_dummy_components() lowercase : Optional[int] = self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowercase : Any = self.get_dummy_inversion_inputs(lowerCAmelCase ) lowercase : Tuple = pipe.invert(**lowerCAmelCase ).images lowercase : Tuple = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) lowercase : List[Any] = np.array( [0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], ) lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCAmelCase, 1e-3 ) def lowercase ( self : str ) -> int: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def lowercase ( self : List[str] ) -> Tuple: lowercase : Dict = 'cpu' lowercase : Any = self.get_dummy_components() lowercase : List[Any] = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'} lowercase : List[str] = DPMSolverMultistepScheduler(**lowerCAmelCase ) lowercase : Dict = DPMSolverMultistepInverseScheduler(**lowerCAmelCase ) lowercase : str = self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowercase : List[Any] = self.get_dummy_inversion_inputs(lowerCAmelCase ) lowercase : int = pipe.invert(**lowerCAmelCase ).images lowercase : Tuple = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) lowercase : Dict = np.array( [0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], ) lowercase : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCAmelCase, 1e-3 ) @require_torch_gpu @slow class a__ ( unittest.TestCase ): def lowercase ( self : Optional[Any] ) -> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def lowercase ( cls : Optional[int] ) -> Tuple: lowercase : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) lowercase : Optional[Any] = raw_image.convert('RGB' ).resize((768, 768) ) lowercase : Any = raw_image def lowercase ( self : Optional[Any] ) -> List[Any]: lowercase : str = torch.manual_seed(0 ) lowercase : int = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowercase : List[str] = DDIMScheduler.from_config(pipe.scheduler.config ) lowercase : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowercase : List[Any] = 'a bowl of fruit' lowercase : List[Any] = 'a bowl of pears' lowercase : int = pipe.generate_mask( image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, ) lowercase : Tuple = pipe.invert( prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase ).latents lowercase : str = pipe( prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, output_type='numpy', ).images[0] lowercase : Dict = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def lowercase ( self : Union[str, Any] ) -> List[Any]: lowercase : Dict = torch.manual_seed(0 ) lowercase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowercase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowercase : Union[str, Any] = 'a bowl of fruit' lowercase : List[Any] = 'a bowl of pears' lowercase : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, ) lowercase : List[str] = pipe.invert( prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase, num_inference_steps=25, ).latents lowercase : int = pipe( prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0] lowercase : Tuple = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
255
1
'''simple docstring''' import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __lowerCAmelCase (__lowerCAmelCase ): if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ): return False return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule ) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ): _UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) _UpperCAmelCase : Dict = is_compiled_module(__lowerCAmelCase ) if is_compiled: _UpperCAmelCase : Optional[int] = model _UpperCAmelCase : Any = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Any = model.module if not keep_fpaa_wrapper: _UpperCAmelCase : List[Any] = getattr(__lowerCAmelCase , "forward" ) _UpperCAmelCase : Dict = model.__dict__.pop("_original_forward" , __lowerCAmelCase ) if original_forward is not None: while hasattr(__lowerCAmelCase , "__wrapped__" ): _UpperCAmelCase : Optional[int] = forward.__wrapped__ if forward == original_forward: break _UpperCAmelCase : Dict = forward if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ): convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase ) if is_compiled: _UpperCAmelCase : int = model _UpperCAmelCase : str = compiled_model return model def __lowerCAmelCase (): PartialState().wait_for_everyone() def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): if PartialState().distributed_type == DistributedType.TPU: xm.save(__lowerCAmelCase , __lowerCAmelCase ) elif PartialState().local_process_index == 0: torch.save(__lowerCAmelCase , __lowerCAmelCase ) @contextmanager def __lowerCAmelCase (**__lowerCAmelCase ): for key, value in kwargs.items(): _UpperCAmelCase : str = str(__lowerCAmelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __lowerCAmelCase (__lowerCAmelCase ): if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ): _UpperCAmelCase : List[str] = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase ) if hasattr(__lowerCAmelCase , "__qualname__" ): return obj.__qualname__ if hasattr(__lowerCAmelCase , "__name__" ): return obj.__name__ return str(__lowerCAmelCase ) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): for key, value in source.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Any = destination.setdefault(__lowerCAmelCase , {} ) merge_dicts(__lowerCAmelCase , __lowerCAmelCase ) else: _UpperCAmelCase : Optional[int] = value return destination def __lowerCAmelCase (__lowerCAmelCase = None ): if port is None: _UpperCAmelCase : Tuple = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
322
'''simple docstring''' import pytest lowerCamelCase__ = '__dummy_dataset1__' lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def __lowerCAmelCase (): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __lowerCAmelCase (): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Optional[Any] = dataset_loading_script_name _UpperCAmelCase : Any = tmp_path / "datasets" / script_name script_dir.mkdir(parents=__lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py""" with open(__lowerCAmelCase , "w" ) as f: f.write(__lowerCAmelCase ) return str(__lowerCAmelCase )
322
1
from collections import namedtuple import requests from lxml import html # type: ignore A__ : Tuple = namedtuple('''covid_data''', '''cases deaths recovered''') def UpperCamelCase( __UpperCamelCase : str = "https://www.worldometers.info/coronavirus/" ): lowerCAmelCase_ : Optional[int] = """//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE__ ).content ).xpath(SCREAMING_SNAKE_CASE__ ) ) A__ : Any = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
103
import math def A__ ( SCREAMING_SNAKE_CASE__) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def A__ ( SCREAMING_SNAKE_CASE__ = 1_0001) -> int: try: __snake_case: List[str] = int(SCREAMING_SNAKE_CASE__) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""") from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""") __snake_case: list[int] = [] __snake_case: List[str] = 2 while len(SCREAMING_SNAKE_CASE__) < nth: if is_prime(SCREAMING_SNAKE_CASE__): primes.append(SCREAMING_SNAKE_CASE__) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE__) - 1] if __name__ == "__main__": print(f'{solution() = }')
111
0
import math def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = 0 ) -> list: UpperCamelCase__ : Dict = end or len(__lowerCAmelCase ) for i in range(__lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ : int = i UpperCamelCase__ : Tuple = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: UpperCamelCase__ : Optional[Any] = array[temp_index - 1] temp_index -= 1 UpperCamelCase__ : Optional[int] = temp_index_value return array def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None: # Max Heap UpperCamelCase__ : Dict = index UpperCamelCase__ : List[Any] = 2 * index + 1 # Left Node UpperCamelCase__ : Any = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: UpperCamelCase__ : Optional[Any] = left_index if right_index < heap_size and array[largest] < array[right_index]: UpperCamelCase__ : Dict = right_index if largest != index: UpperCamelCase__ , UpperCamelCase__ : Any = array[largest], array[index] heapify(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list: UpperCamelCase__ : Any = len(__lowerCAmelCase ) for i in range(n // 2 , -1 , -1 ): heapify(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for i in range(n - 1 , 0 , -1 ): UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = array[0], array[i] heapify(__lowerCAmelCase , 0 , __lowerCAmelCase ) return array def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: UpperCamelCase__ : List[Any] = low UpperCamelCase__ : Optional[int] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = array[j], array[i] i += 1 def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list: if len(__lowerCAmelCase ) == 0: return array UpperCamelCase__ : Optional[int] = 2 * math.ceil(math.loga(len(__lowerCAmelCase ) ) ) UpperCamelCase__ : Tuple = 16 return intro_sort(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(__lowerCAmelCase ) max_depth -= 1 UpperCamelCase__ : int = median_of_a(__lowerCAmelCase , __lowerCAmelCase , start + ((end - start) // 2) + 1 , end - 1 ) UpperCamelCase__ : Any = partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) intro_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) UpperCamelCase__ : str = p return insertion_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : List[str] =input('''Enter numbers separated by a comma : ''').strip() lowerCamelCase : List[Any] =[float(item) for item in user_input.split(''',''')] print(sort(unsorted))
196
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class __a ( A__ ): _lowerCAmelCase : str = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) _lowerCAmelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} ) _lowerCAmelCase : ClassVar[Features] = Features({} ) _lowerCAmelCase : str = "text" @property def __lowercase ( self : str ): '''simple docstring''' return {self.text_column: "text"}
196
1
import numpy as np def UpperCamelCase ( snake_case__ : np.ndarray ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def UpperCamelCase ( snake_case__ : np.ndarray ) -> np.ndarray: return vector * sigmoid(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
119
from __future__ import annotations from typing import Any class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None: UpperCamelCase : Any = num_of_nodes UpperCamelCase : list[list[int]] = [] UpperCamelCase : dict[int, int] = {} def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: self.m_edges.append([u_node, v_node, weight] ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int: if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None: if self.m_component[u_node] != u_node: for k in self.m_component: UpperCamelCase : Dict = self.find_component(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: if component_size[u_node] <= component_size[v_node]: UpperCamelCase : Tuple = v_node component_size[v_node] += component_size[u_node] self.set_component(SCREAMING_SNAKE_CASE_ ) elif component_size[u_node] >= component_size[v_node]: UpperCamelCase : Union[str, Any] = self.find_component(SCREAMING_SNAKE_CASE_ ) component_size[u_node] += component_size[v_node] self.set_component(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ) -> None: UpperCamelCase : int = [] UpperCamelCase : int = 0 UpperCamelCase : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) UpperCamelCase : List[str] = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = edge UpperCamelCase : str = self.m_component[u] UpperCamelCase : Any = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): UpperCamelCase : Any = [u, v, w] for edge in minimum_weight_edge: if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase , UpperCamelCase , UpperCamelCase : int = edge UpperCamelCase : List[Any] = self.m_component[u] UpperCamelCase : Tuple = self.m_component[v] if u_component != v_component: mst_weight += w self.union(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 UpperCamelCase : Optional[Any] = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def UpperCamelCase ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
119
1
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = XLNetTokenizer UpperCamelCase = XLNetTokenizerFast UpperCamelCase = True UpperCamelCase = True def _lowerCamelCase ( self : Tuple) -> Any: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase = XLNetTokenizer(A , keep_accents=A) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname) def _lowerCamelCase ( self : Dict) -> Any: """simple docstring""" _UpperCAmelCase = '<s>' _UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A) , A) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A) , A) def _lowerCamelCase ( self : Tuple) -> List[Any]: """simple docstring""" _UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<unk>') self.assertEqual(vocab_keys[1] , '<s>') self.assertEqual(vocab_keys[-1] , '<eod>') self.assertEqual(len(A) , 10_06) def _lowerCamelCase ( self : Union[str, Any]) -> int: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_00) def _lowerCamelCase ( self : Union[str, Any]) -> Tuple: """simple docstring""" _UpperCAmelCase = XLNetTokenizer(A , keep_accents=A) _UpperCAmelCase = tokenizer.tokenize('This is a test') self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual(tokenizer.convert_tokens_to_ids(A) , [2_85, 46, 10, 1_70, 3_82]) _UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( A , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _UpperCAmelCase = tokenizer.convert_tokens_to_ids(A) self.assertListEqual(A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4]) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(A) self.assertListEqual( A , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def _lowerCamelCase ( self : Tuple) -> str: """simple docstring""" _UpperCAmelCase = XLNetTokenizer(A , do_lower_case=A) _UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( A , [ SPIECE_UNDERLINE + '', 'i', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['▁he', 'll', 'o']) def _lowerCamelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = XLNetTokenizer(A , do_lower_case=A) _UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( A , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] , ) @slow def _lowerCamelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" _UpperCAmelCase = XLNetTokenizer.from_pretrained('xlnet-base-cased') _UpperCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=A) _UpperCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=A) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A , A) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def _lowerCamelCase ( self : int) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = {'input_ids': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
290
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __lowerCAmelCase ( A ): def _lowerCamelCase ( self : List[str]) -> int: """simple docstring""" _UpperCAmelCase = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(A , 'hidden_sizes')) self.parent.assertTrue(hasattr(A , 'neck_hidden_sizes')) self.parent.assertTrue(hasattr(A , 'num_attention_heads')) class __lowerCAmelCase : def __init__( self : int , A : Tuple , A : List[str]=13 , A : List[str]=32 , A : List[str]=2 , A : List[str]=3 , A : List[Any]=6_40 , A : Any=4 , A : int="silu" , A : int=3 , A : Dict=32 , A : List[Any]=0.1 , A : Optional[Any]=0.1 , A : Optional[int]=0.1 , A : List[str]=0.0_2 , A : int=True , A : Any=True , A : List[str]=10 , A : Tuple=None , ) -> Dict: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = last_hidden_size _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = conv_kernel_size _UpperCAmelCase = output_stride _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = classifier_dropout_prob _UpperCAmelCase = use_labels _UpperCAmelCase = is_training _UpperCAmelCase = num_labels _UpperCAmelCase = initializer_range _UpperCAmelCase = scope def _lowerCamelCase ( self : Union[str, Any]) -> Any: """simple docstring""" _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels) _UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels) _UpperCAmelCase = self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCamelCase ( self : str) -> int: """simple docstring""" return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self : List[Any] , A : Dict , A : Tuple , A : int , A : Tuple) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = MobileViTModel(config=A) model.to(A) model.eval() _UpperCAmelCase = model(A) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCamelCase ( self : int , A : Any , A : List[Any] , A : List[Any] , A : Optional[int]) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = MobileViTForImageClassification(A) model.to(A) model.eval() _UpperCAmelCase = model(A , labels=A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _lowerCamelCase ( self : int , A : Tuple , A : Optional[Any] , A : Union[str, Any] , A : List[Any]) -> int: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = MobileViTForSemanticSegmentation(A) model.to(A) model.eval() _UpperCAmelCase = model(A) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _UpperCAmelCase = model(A , labels=A) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCamelCase ( self : int) -> Any: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( A , A , unittest.TestCase ): UpperCamelCase = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def _lowerCamelCase ( self : str) -> Tuple: """simple docstring""" _UpperCAmelCase = MobileViTModelTester(self) _UpperCAmelCase = MobileViTConfigTester(self , config_class=A , has_text_modality=A) def _lowerCamelCase ( self : Optional[int]) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds') def _lowerCamelCase ( self : Tuple) -> Dict: """simple docstring""" pass @unittest.skip(reason='MobileViT does not support input and output embeddings') def _lowerCamelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='MobileViT does not output attentions') def _lowerCamelCase ( self : Any) -> Optional[Any]: """simple docstring""" pass def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(A) _UpperCAmelCase = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , A) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def _lowerCamelCase ( self : Union[str, Any]) -> List[str]: """simple docstring""" pass def _lowerCamelCase ( self : Tuple) -> str: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A) def _lowerCamelCase ( self : Optional[int]) -> Any: """simple docstring""" def check_hidden_states_output(A : List[str] , A : Union[str, Any] , A : int): _UpperCAmelCase = model_class(A) model.to(A) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(A , A)) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = 5 self.assertEqual(len(A) , A) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _UpperCAmelCase = 2 for i in range(len(A)): self.assertListEqual( list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(A , A , A) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(A , A , A) def _lowerCamelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A) def _lowerCamelCase ( self : int) -> Tuple: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A) @slow def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]: """simple docstring""" for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = MobileViTModel.from_pretrained(A) self.assertIsNotNone(A) def A ( ) -> List[str]: '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self : Tuple) -> Dict: """simple docstring""" return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small') if is_vision_available() else None @slow def _lowerCamelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small').to(A) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**A) # verify the logits _UpperCAmelCase = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , A) _UpperCAmelCase = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3]).to(A) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4)) @slow def _lowerCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small') _UpperCAmelCase = model.to(A) _UpperCAmelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small') _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**A) _UpperCAmelCase = outputs.logits # verify the logits _UpperCAmelCase = torch.Size((1, 21, 32, 32)) self.assertEqual(logits.shape , A) _UpperCAmelCase = torch.tensor( [ [[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]], [[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]], [[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]], ] , device=A , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4)) @slow def _lowerCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" _UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small') _UpperCAmelCase = model.to(A) _UpperCAmelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small') _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**A) _UpperCAmelCase = outputs.logits.detach().cpu() _UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)]) _UpperCAmelCase = torch.Size((50, 60)) self.assertEqual(segmentation[0].shape , A) _UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A) _UpperCAmelCase = torch.Size((32, 32)) self.assertEqual(segmentation[0].shape , A)
290
1
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class _lowerCAmelCase ( ctypes.Structure ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def _A ( ) -> int: '''simple docstring''' if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-11) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_, ctypes.byref(UpperCamelCase_)) __lowercase = False ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_, ctypes.byref(UpperCamelCase_)) elif os.name == "posix": sys.stdout.write("\033[?25l") sys.stdout.flush() def _A ( ) -> int: '''simple docstring''' if os.name == "nt": __lowercase = CursorInfo() __lowercase = ctypes.windll.kernelaa.GetStdHandle(-11) ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_, ctypes.byref(UpperCamelCase_)) __lowercase = True ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_, ctypes.byref(UpperCamelCase_)) elif os.name == "posix": sys.stdout.write("\033[?25h") sys.stdout.flush() @contextmanager def _A ( ) -> Optional[int]: '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
17
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]: '''simple docstring''' if isinstance(UpperCamelCase_, torch.Tensor): return image elif isinstance(UpperCamelCase_, PIL.Image.Image): __lowercase = [image] if isinstance(image[0], PIL.Image.Image): __lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] __lowercase = np.concatenate(UpperCamelCase_, axis=0) __lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0 __lowercase = image.transpose(0, 3, 1, 2) __lowercase = 2.0 * image - 1.0 __lowercase = torch.from_numpy(UpperCamelCase_) elif isinstance(image[0], torch.Tensor): __lowercase = torch.cat(UpperCamelCase_, dim=0) return image def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int: '''simple docstring''' if not isinstance(UpperCamelCase_, np.ndarray): __lowercase = True __lowercase = va.device __lowercase = va.cpu().numpy() __lowercase = va.cpu().numpy() __lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_))) if np.abs(UpperCamelCase_) > DOT_THRESHOLD: __lowercase = (1 - t) * va + t * va else: __lowercase = np.arccos(UpperCamelCase_) __lowercase = np.sin(UpperCamelCase_) __lowercase = theta_a * t __lowercase = np.sin(UpperCamelCase_) __lowercase = np.sin(theta_a - theta_t) / sin_theta_a __lowercase = sin_theta_t / sin_theta_a __lowercase = sa * va + sa * va if inputs_are_torch: __lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_) return va def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int: '''simple docstring''' __lowercase = F.normalize(UpperCamelCase_, dim=-1) __lowercase = F.normalize(UpperCamelCase_, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]: '''simple docstring''' for param in model.parameters(): __lowercase = value class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ): super().__init__() self.register_modules( vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, ) __lowercase = ( feature_extractor.size if isinstance(feature_extractor.size, UpperCAmelCase__ ) else feature_extractor.size["shortest_edge"] ) __lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std ) set_requires_grad(self.text_encoder, UpperCAmelCase__ ) set_requires_grad(self.clip_model, UpperCAmelCase__ ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__ ) def _lowercase ( self : int ): self.enable_attention_slicing(UpperCAmelCase__ ) def _lowercase ( self : str ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ): # get the original timestep using init_timestep __lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ ) __lowercase = max(num_inference_steps - init_timestep, 0 ) __lowercase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ): if not isinstance(UpperCAmelCase__, torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" ) __lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ ) ] __lowercase = torch.cat(UpperCAmelCase__, dim=0 ) else: __lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 0.18_215 * init_latents __lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 ) __lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) # get latents __lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = init_latents return latents def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ): __lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): __lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) ) __lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ): __lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ ) __lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 ) return image_embeddings_clip @torch.enable_grad() def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ): __lowercase = latents.detach().requires_grad_() __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): __lowercase = self.scheduler.alphas_cumprod[timestep] __lowercase = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 __lowercase = torch.sqrt(UpperCAmelCase__ ) __lowercase = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = self.scheduler.sigmas[index] __lowercase = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * sample __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ ) __lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype ) __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale __lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0] if isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = latents.detach() + grads * (sigma**2) __lowercase = noise_pred_original else: __lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1: __lowercase = [generator] + [None] * (batch_size - 1) __lowercase = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] __lowercase = [x[0] for x in coca_is_none if x[1]] __lowercase = ", ".join(UpperCAmelCase__ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) if style_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) # get prompt text embeddings for content and style __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # duplicate text embeddings for each generation per prompt __lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # set timesteps __lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) __lowercase = {} if accepts_offset: __lowercase = 1 self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) __lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device ) __lowercase = timesteps[:1].repeat(UpperCAmelCase__ ) # Preprocess image __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) if clip_guidance_scale > 0: __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = slerp( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __lowercase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __lowercase = content_text_input.input_ids.shape[-1] __lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" ) __lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt __lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowercase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8) __lowercase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to( self.device ) else: __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __lowercase = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __lowercase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __lowercase = {} if accepts_eta: __lowercase = eta # check if the scheduler accepts generator __lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: __lowercase = generator with self.progress_bar(total=UpperCAmelCase__ ): for i, t in enumerate(UpperCAmelCase__ ): # expand the latents if we are doing classifier free guidance __lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample # perform classifier free guidance if do_classifier_free_guidance: __lowercase ,__lowercase = noise_pred.chunk(2 ) __lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: __lowercase = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) __lowercase ,__lowercase = self.cond_fn( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) # compute the previous noisy sample x_t -> x_t-1 __lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * latents __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __lowercase = self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
17
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase__ = { 'vocab_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json' ), }, } lowerCamelCase__ = { 'yjernite/retribert-base-uncased': 512, } lowerCamelCase__ = { 'yjernite/retribert-base-uncased': {'do_lower_case': True}, } class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : Dict = VOCAB_FILES_NAMES lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : int = RetriBertTokenizer lowerCAmelCase : int = ["input_ids", "attention_mask"] def __init__( self : int , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]="[UNK]" , lowerCamelCase__ : List[Any]="[SEP]" , lowerCamelCase__ : Dict="[PAD]" , lowerCamelCase__ : List[str]="[CLS]" , lowerCamelCase__ : Tuple="[MASK]" , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : int , ) ->int: '''simple docstring''' super().__init__( lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , ) _UpperCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars ): _UpperCAmelCase : Dict = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) ) _UpperCAmelCase : Optional[int] = do_lower_case _UpperCAmelCase : Optional[Any] = strip_accents _UpperCAmelCase : str = tokenize_chinese_chars _UpperCAmelCase : Optional[int] = normalizer_class(**lowerCamelCase__ ) _UpperCAmelCase : List[Any] = do_lower_case def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=None ) ->Tuple: '''simple docstring''' _UpperCAmelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]: '''simple docstring''' _UpperCAmelCase : int = [self.sep_token_id] _UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]: '''simple docstring''' _UpperCAmelCase : Tuple = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ )
359
'''simple docstring''' import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __lowerCAmelCase (__lowerCAmelCase ): if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ): return False return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule ) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ): _UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) _UpperCAmelCase : Dict = is_compiled_module(__lowerCAmelCase ) if is_compiled: _UpperCAmelCase : Optional[int] = model _UpperCAmelCase : Any = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Any = model.module if not keep_fpaa_wrapper: _UpperCAmelCase : List[Any] = getattr(__lowerCAmelCase , "forward" ) _UpperCAmelCase : Dict = model.__dict__.pop("_original_forward" , __lowerCAmelCase ) if original_forward is not None: while hasattr(__lowerCAmelCase , "__wrapped__" ): _UpperCAmelCase : Optional[int] = forward.__wrapped__ if forward == original_forward: break _UpperCAmelCase : Dict = forward if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ): convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase ) if is_compiled: _UpperCAmelCase : int = model _UpperCAmelCase : str = compiled_model return model def __lowerCAmelCase (): PartialState().wait_for_everyone() def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): if PartialState().distributed_type == DistributedType.TPU: xm.save(__lowerCAmelCase , __lowerCAmelCase ) elif PartialState().local_process_index == 0: torch.save(__lowerCAmelCase , __lowerCAmelCase ) @contextmanager def __lowerCAmelCase (**__lowerCAmelCase ): for key, value in kwargs.items(): _UpperCAmelCase : str = str(__lowerCAmelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __lowerCAmelCase (__lowerCAmelCase ): if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ): _UpperCAmelCase : List[str] = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase ) if hasattr(__lowerCAmelCase , "__qualname__" ): return obj.__qualname__ if hasattr(__lowerCAmelCase , "__name__" ): return obj.__name__ return str(__lowerCAmelCase ) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): for key, value in source.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Any = destination.setdefault(__lowerCAmelCase , {} ) merge_dicts(__lowerCAmelCase , __lowerCAmelCase ) else: _UpperCAmelCase : Optional[int] = value return destination def __lowerCAmelCase (__lowerCAmelCase = None ): if port is None: _UpperCAmelCase : Tuple = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
322
0
"""simple docstring""" class __lowerCAmelCase : # Public class to implement a graph '''simple docstring''' def __init__( self , _a , _a , _a ): __a = row __a = col __a = graph def __UpperCAmelCase ( self , _a , _a , _a ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __UpperCAmelCase ( self , _a , _a , _a ): # Checking all 8 elements surrounding nth element __a = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __a = [-1, 0, 1, -1, 1, -1, 0, 1] __a = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _a ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _a ) def __UpperCAmelCase ( self ): # And finally, count all islands. __a = [[False for j in range(self.COL )] for i in range(self.ROW )] __a = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_a , _a , _a ) count += 1 return count
45
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
0
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''nvidia/segformer-b0-finetuned-ade-512-512''': ( '''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json''' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''segformer''' def __init__( self :int , snake_case :str=3 , snake_case :int=4 , snake_case :Optional[Any]=[2, 2, 2, 2] , snake_case :Union[str, Any]=[8, 4, 2, 1] , snake_case :List[Any]=[32, 64, 160, 256] , snake_case :Union[str, Any]=[7, 3, 3, 3] , snake_case :int=[4, 2, 2, 2] , snake_case :Any=[1, 2, 5, 8] , snake_case :List[str]=[4, 4, 4, 4] , snake_case :Tuple="gelu" , snake_case :List[Any]=0.0 , snake_case :int=0.0 , snake_case :Optional[int]=0.1 , snake_case :List[Any]=0.02 , snake_case :Dict=0.1 , snake_case :Any=1e-6 , snake_case :Union[str, Any]=256 , snake_case :Union[str, Any]=255 , **snake_case :List[Any] , ): '''simple docstring''' super().__init__(**snake_case ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( "Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be" " removed, as the behaviour will default to that of reshape_last_stage = True." , snake_case , ) A_ : Any = num_channels A_ : Tuple = num_encoder_blocks A_ : int = depths A_ : int = sr_ratios A_ : Dict = hidden_sizes A_ : Tuple = patch_sizes A_ : Optional[int] = strides A_ : str = mlp_ratios A_ : Dict = num_attention_heads A_ : Optional[int] = hidden_act A_ : Optional[Any] = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Tuple = classifier_dropout_prob A_ : Tuple = initializer_range A_ : List[Any] = drop_path_rate A_ : Optional[Any] = layer_norm_eps A_ : str = decoder_hidden_size A_ : Union[str, Any] = kwargs.get("reshape_last_stage" , snake_case ) A_ : Tuple = semantic_loss_ignore_index class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' return 1e-4 @property def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' return 12
70
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Any = { '''configuration_clap''': [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapAudioConfig''', '''ClapConfig''', '''ClapTextConfig''', ], '''processing_clap''': ['''ClapProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapModel''', '''ClapPreTrainedModel''', '''ClapTextModel''', '''ClapTextModelWithProjection''', '''ClapAudioModel''', '''ClapAudioModelWithProjection''', ] _lowerCAmelCase : int = ['''ClapFeatureExtractor'''] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys _lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
70
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata _a = '''''' if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"): class __A ( tr.AbstractTransform ): '''simple docstring''' def __init__( self , __lowerCAmelCase = " " ): '''simple docstring''' lowerCamelCase__ = sentence_delimiter def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return list(__lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = [] for sent_idx, sentence in enumerate(__lowerCAmelCase ): chars.extend(self.process_string(__lowerCAmelCase ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__lowerCAmelCase ) - 1: chars.append(self.sentence_delimiter ) return chars _a = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: _a = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) _a = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' _a = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' _a = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ): '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( __lowerCAmelCase , __lowerCAmelCase , truth_transform=__lowerCAmelCase , hypothesis_transform=__lowerCAmelCase , )["wer"] lowerCamelCase__ = 0 lowerCamelCase__ = 0 for prediction, reference in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = jiwer.compute_measures( __lowerCAmelCase , __lowerCAmelCase , truth_transform=__lowerCAmelCase , hypothesis_transform=__lowerCAmelCase , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
209
import argparse from collections import defaultdict import yaml A : str = '''docs/source/en/_toctree.yml''' def __lowerCamelCase ( __a :str ) -> List[Any]: """simple docstring""" A__ = defaultdict(__a ) A__ = [] A__ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__a ) A__ = new_doc_list A__ = [key for key, value in counts.items() if value > 1] A__ = [] for duplicate_key in duplicates: A__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__a ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) A__ = sorted(__a , key=lambda __a : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__a ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__a ) # Sort return overview_doc def __lowerCamelCase ( __a :Any=False ) -> List[str]: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 A__ = api_doc[scheduler_idx]["""sections"""] A__ = clean_doc_toc(__a ) A__ = False if new_scheduler_doc != scheduler_doc: A__ = True if overwrite: A__ = new_scheduler_doc if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def __lowerCamelCase ( __a :Optional[int]=False ) -> Dict: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 A__ = False A__ = api_doc[pipeline_idx]["""sections"""] A__ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: A__ = pipeline_doc["""section"""] A__ = clean_doc_toc(__a ) if overwrite: A__ = new_sub_pipeline_doc new_pipeline_docs.append(__a ) # sort overall pipeline doc A__ = clean_doc_toc(__a ) if new_pipeline_docs != pipeline_docs: A__ = True if overwrite: A__ = new_pipeline_docs if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') A : Optional[Any] = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
274
0
"""simple docstring""" import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: # Initialise PyTorch model lowercase__: Optional[Any] = MobileBertConfig.from_json_file(__UpperCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) lowercase__: int = MobileBertForPreTraining(__UpperCAmelCase ) # Load weights from tf checkpoint lowercase__: Union[str, Any] = load_tf_weights_in_mobilebert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , __UpperCAmelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __A = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
2
"""simple docstring""" import unittest from transformers import DonutProcessor __A = "naver-clova-ix/donut-base" class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def _snake_case ( self ): lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase ) def _snake_case ( self ): lowercase__: Tuple = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__: Union[str, Any] = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__: str = self.processor.tokenajson(_UpperCAmelCase ) self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
2
1
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __lowerCAmelCase : '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=3 , UpperCamelCase : str=32 , UpperCamelCase : Tuple=3 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : int=[8, 16, 32, 64] , UpperCamelCase : Dict=[1, 1, 2, 1] , UpperCamelCase : int=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Optional[int]="relu" , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Tuple=None , UpperCamelCase : Optional[int]=["stage2", "stage3", "stage4"] , UpperCamelCase : Optional[int]=[2, 3, 4] , UpperCamelCase : Tuple=1 , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = num_channels lowercase__ = embeddings_size lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_act lowercase__ = num_labels lowercase__ = scope lowercase__ = len(UpperCamelCase ) lowercase__ = out_features lowercase__ = out_indices lowercase__ = num_groups def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def UpperCamelCase__ (self : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : str ): '''simple docstring''' lowercase__ = BitModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Dict ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = BitForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ (self : Any , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str ): '''simple docstring''' lowercase__ = BitBackbone(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase__ = None lowercase__ = BitBackbone(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs lowercase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : List[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () lowerCAmelCase__ : Union[str, Any] = ( {"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification} if is_torch_available() else {} ) lowerCAmelCase__ : Optional[Any] = False lowerCAmelCase__ : str = False lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : int = False lowerCAmelCase__ : str = False def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = BitModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase ) def UpperCamelCase__ (self : str ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ (self : Tuple ): '''simple docstring''' return @unittest.skip(reason='''Bit does not output attentions''' ) def UpperCamelCase__ (self : Any ): '''simple docstring''' pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def UpperCamelCase__ (self : Any ): '''simple docstring''' pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' pass def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(config=UpperCamelCase ) for name, module in model.named_modules(): if isinstance(UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' def check_hidden_states_output(UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ): lowercase__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): lowercase__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ = layer_type lowercase__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' pass def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def UpperCamelCase__ (self : Any ): '''simple docstring''' for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = BitModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def _SCREAMING_SNAKE_CASE () -> List[str]: """simple docstring""" lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase__ (self : int ): '''simple docstring''' return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=UpperCamelCase , return_tensors='''pt''' ).to(UpperCamelCase ) # forward pass with torch.no_grad(): lowercase__ = model(**UpperCamelCase ) # verify the logits lowercase__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowercase__ = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 ) ) @require_torch class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Dict = (BitBackbone,) if is_torch_available() else () lowerCAmelCase__ : Optional[int] = BitConfig lowerCAmelCase__ : List[str] = False def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = BitModelTester(self )
2
'''simple docstring''' from ....utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ): '''simple docstring''' lowercase__ = config.__dict__ lowercase__ = modal_hidden_size if num_labels: lowercase__ = num_labels
2
1
def UpperCamelCase__( UpperCamelCase__ : int = 10**12 )->int: A__ = 1 A__ = 0 A__ = 1 A__ = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F"{solution() = }")
39
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): __SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer'''] __SCREAMING_SNAKE_CASE = '''Pix2StructImageProcessor''' __SCREAMING_SNAKE_CASE = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self,__lowerCamelCase,__lowerCamelCase ): A__ = False super().__init__(__lowerCamelCase,__lowerCamelCase ) def __call__( self,__lowerCamelCase=None,__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = 2048,__lowerCamelCase = 0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = True,__lowerCamelCase = None,**__lowerCamelCase,): if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None and not self.image_processor.is_vqa: A__ = self.tokenizer A__ = self.tokenizer( text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A__ = self.image_processor( __lowerCamelCase,return_tensors=__lowerCamelCase,max_patches=__lowerCamelCase,**__lowerCamelCase ) else: # add pixel_values and bbox A__ = self.image_processor( __lowerCamelCase,return_tensors=__lowerCamelCase,max_patches=__lowerCamelCase,header_text=__lowerCamelCase,**__lowerCamelCase ) if text is not None and not self.image_processor.is_vqa: A__ = self.tokenizer( text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,) if "attention_mask" in text_encoding: A__ = text_encoding.pop('''attention_mask''' ) if "input_ids" in text_encoding: A__ = text_encoding.pop('''input_ids''' ) else: A__ = None if text_encoding is not None: encoding_image_processor.update(__lowerCamelCase ) return encoding_image_processor def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ): return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase ) def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ): return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase ) @property def UpperCamelCase ( self ): A__ = self.tokenizer.model_input_names A__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
39
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: snake_case_ = None snake_case_ = logging.get_logger(__name__) snake_case_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} snake_case_ = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json', }, } snake_case_ = { 'camembert-base': 512, } snake_case_ = '▁' class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : str = VOCAB_FILES_NAMES A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP A_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[int] = ['input_ids', 'attention_mask'] A_ : Union[str, Any] = CamembertTokenizer def __init__(self : List[Any] , a__ : Tuple=None , a__ : Dict=None , a__ : List[str]="<s>" , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="</s>" , a__ : int="<s>" , a__ : Optional[Any]="<unk>" , a__ : Optional[int]="<pad>" , a__ : Optional[Any]="<mask>" , a__ : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **a__ : Any , ): """simple docstring""" __snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token super().__init__( a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , additional_special_tokens=a__ , **a__ , ) __snake_case = vocab_file __snake_case = False if not self.vocab_file else True def a (self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case = [self.cls_token_id] __snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a (self : Dict , a__ : List[int] , a__ : Optional[List[int]] = None ): """simple docstring""" __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a (self : str , a__ : str , a__ : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(a__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __snake_case = os.path.join( a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ): copyfile(self.vocab_file , a__ ) return (out_vocab_file,)
24
import os import pytest from transformers.dynamic_module_utils import get_imports snake_case_ = '\nimport os\n' snake_case_ = '\ndef foo():\n import os\n return False\n' snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' snake_case_ = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , snake_case_ ) def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict: __snake_case = os.path.join(snake_case_ , '''test_file.py''' ) with open(snake_case_ , '''w''' ) as _tmp_file: _tmp_file.write(snake_case_ ) __snake_case = get_imports(snake_case_ ) assert parsed_imports == ["os"]
24
1
"""simple docstring""" from cva import destroyAllWindows, imread, imshow, waitKey def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' # getting number of pixels in the image _a , _a : Tuple = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): _a : int = [2_5_5, 2_5_5, 2_5_5] - img[i][j] return img if __name__ == "__main__": # read original image _snake_case = imread('image_data/lena.jpg', 1) # convert to its negative _snake_case = convert_to_negative(img) # show result image imshow('negative of original image', img) waitKey(0) destroyAllWindows()
324
"""simple docstring""" import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin _snake_case = get_tests_dir('fixtures/test_sentencepiece.model') _snake_case = get_tests_dir('fixtures/test_sentencepiece_bpe.model') _snake_case = 'pt' if is_torch_available() else 'tf' @require_sentencepiece @require_tokenizers class UpperCamelCase ( snake_case_ , unittest.TestCase ): UpperCamelCase : str = CamembertTokenizer UpperCamelCase : List[Any] = CamembertTokenizerFast UpperCamelCase : Optional[int] = True UpperCamelCase : Union[str, Any] = True def _lowercase ( self : List[Any] ) -> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing _a : List[Any] = CamembertTokenizer(UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self : List[str] ) -> Tuple: _a : Optional[Any] = """<pad>""" _a : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: _a : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(UpperCAmelCase__ ) , 1004 ) def _lowercase ( self : List[str] ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def _lowercase ( self : Union[str, Any] ) -> str: _a : Tuple = CamembertTokenizer(UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) _a : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) _a : Any = """I was born in 92000, and this is falsé.""" _a : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ ) _a : Dict = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Tuple = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) _a : List[Any] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) _a : List[str] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) _a : int = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : Dict ) -> List[str]: if not self.test_rust_tokenizer: return _a : Optional[int] = self.get_tokenizer() _a : Tuple = self.get_rust_tokenizer() _a : List[Any] = """I was born in 92000, and this is falsé.""" _a : List[str] = tokenizer.tokenize(UpperCAmelCase__ ) _a : Union[str, Any] = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) _a : int = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) _a : Optional[int] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) _a : int = self.get_rust_tokenizer() _a : Optional[Any] = tokenizer.encode(UpperCAmelCase__ ) _a : Dict = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def _lowercase ( self : Tuple ) -> List[Any]: # fmt: off _a : Dict = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. _a : Union[str, Any] = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCAmelCase__ , )
324
1
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets lowerCamelCase_ = '''\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' lowerCamelCase_ = '''\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. ''' lowerCamelCase_ = ''' Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: \'score\' (float): The chrF (chrF++) score, \'char_order\' (int): The character n-gram order, \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, \'beta\' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ (self ): if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ): raise ImportWarning( """To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n""" """You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[ """https://github.com/m-popovic/chrF""", ] , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = CHRF.CHAR_ORDER , SCREAMING_SNAKE_CASE_ = CHRF.WORD_ORDER , SCREAMING_SNAKE_CASE_ = CHRF.BETA , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , ): UpperCamelCase__ = len(references[0] ) if any(len(SCREAMING_SNAKE_CASE_ ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) UpperCamelCase__ = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE_ )] UpperCamelCase__ = CHRF(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sb_chrf.corpus_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
244
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase_ = logging.get_logger(__name__) class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""pixel_values"""] def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = size if size is not None else {"""shortest_edge""": 2_24} UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" ) UpperCamelCase__ = do_resize UpperCamelCase__ = size UpperCamelCase__ = crop_pct UpperCamelCase__ = resample UpperCamelCase__ = do_center_crop UpperCamelCase__ = crop_size UpperCamelCase__ = do_rescale UpperCamelCase__ = rescale_factor UpperCamelCase__ = do_normalize UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCamelCase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) if crop_pct is not None: if "shortest_edge" in size: UpperCamelCase__ = int(size["""shortest_edge"""] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: UpperCamelCase__ = int(size["""height"""] / crop_pct ) else: UpperCamelCase__ = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct )) else: raise ValueError("""Invalid size for resize: {}""".format(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) else: if "shortest_edge" in size: UpperCamelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE_ ) elif "height" in size and "width" in size: UpperCamelCase__ = (size["""height"""], size["""width"""]) else: raise ValueError("""Invalid size for resize: {}""".format(SCREAMING_SNAKE_CASE_ ) ) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize UpperCamelCase__ = crop_pct if crop_pct is not None else self.crop_pct UpperCamelCase__ = resample if resample is not None else self.resample UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean UpperCamelCase__ = image_std if image_std is not None else self.image_std UpperCamelCase__ = size if size is not None else self.size UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" ) UpperCamelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_pct is None: raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. UpperCamelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: UpperCamelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: UpperCamelCase__ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: UpperCamelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: UpperCamelCase__ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images] UpperCamelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] UpperCamelCase__ = {"""pixel_values""": images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
244
1
from math import isqrt def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> list[int]: UpperCAmelCase_ = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ): UpperCAmelCase_ = False return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]] def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**8 ) -> int: UpperCAmelCase_ = calculate_prime_numbers(max_number // 2 ) UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = len(__UpperCamelCase ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F"{solution() = }")
177
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase = { 'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'], 'tokenization_roberta': ['RobertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['RobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'RobertaForCausalLM', 'RobertaForMaskedLM', 'RobertaForMultipleChoice', 'RobertaForQuestionAnswering', 'RobertaForSequenceClassification', 'RobertaForTokenClassification', 'RobertaModel', 'RobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRobertaForCausalLM', 'TFRobertaForMaskedLM', 'TFRobertaForMultipleChoice', 'TFRobertaForQuestionAnswering', 'TFRobertaForSequenceClassification', 'TFRobertaForTokenClassification', 'TFRobertaMainLayer', 'TFRobertaModel', 'TFRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'FlaxRobertaForCausalLM', 'FlaxRobertaForMaskedLM', 'FlaxRobertaForMultipleChoice', 'FlaxRobertaForQuestionAnswering', 'FlaxRobertaForSequenceClassification', 'FlaxRobertaForTokenClassification', 'FlaxRobertaModel', 'FlaxRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
177
1
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase__: List[str] = logging.get_logger("transformers.models.speecht5") def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> List[Any]: hf_model.apply_weight_norm() UpperCAmelCase : List[Any] = checkpoint['''input_conv.weight_g'''] UpperCAmelCase : Optional[int] = checkpoint['''input_conv.weight_v'''] UpperCAmelCase : Any = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): UpperCAmelCase : str = checkpoint[f"""upsamples.{i}.1.weight_g"""] UpperCAmelCase : Optional[int] = checkpoint[f"""upsamples.{i}.1.weight_v"""] UpperCAmelCase : str = checkpoint[f"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): UpperCAmelCase : str = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""] UpperCAmelCase : List[str] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""] UpperCAmelCase : Optional[int] = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""] UpperCAmelCase : List[Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""] UpperCAmelCase : Union[str, Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""] UpperCAmelCase : str = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""] UpperCAmelCase : Tuple = checkpoint['''output_conv.1.weight_g'''] UpperCAmelCase : Union[str, Any] = checkpoint['''output_conv.1.weight_v'''] UpperCAmelCase : Dict = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[str]=None , ) -> List[Any]: if config_path is not None: UpperCAmelCase : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(_lowerCAmelCase ) else: UpperCAmelCase : Optional[Any] = SpeechTaHifiGanConfig() UpperCAmelCase : Dict = SpeechTaHifiGan(_lowerCAmelCase ) UpperCAmelCase : List[str] = torch.load(_lowerCAmelCase ) load_weights(orig_checkpoint['''model''']['''generator'''] , _lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase : Any = np.load(_lowerCAmelCase ) UpperCAmelCase : Dict = stats[0].reshape(-1 ) UpperCAmelCase : Tuple = stats[1].reshape(-1 ) UpperCAmelCase : Tuple = torch.from_numpy(_lowerCAmelCase ).float() UpperCAmelCase : List[str] = torch.from_numpy(_lowerCAmelCase ).float() model.save_pretrained(_lowerCAmelCase ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(_lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase__: List[Any] = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCamelCase__: Optional[int] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
23
'''simple docstring''' from itertools import product def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> list[int]: lowercase_ : List[Any] = sides_number lowercase_ : Dict = max_face_number * dice_number lowercase_ : List[str] = [0] * (max_total + 1) lowercase_ : Union[str, Any] = 1 lowercase_ : Dict = range(UpperCAmelCase__ , max_face_number + 1 ) for dice_numbers in product(UpperCAmelCase__ , repeat=UpperCAmelCase__ ): lowercase_ : Any = sum(UpperCAmelCase__ ) totals_frequencies[total] += 1 return totals_frequencies def lowerCamelCase ( ) -> float: lowercase_ : Optional[Any] = total_frequency_distribution( sides_number=4 , dice_number=9 ) lowercase_ : List[str] = total_frequency_distribution( sides_number=6 , dice_number=6 ) lowercase_ : Union[str, Any] = 0 lowercase_ : Tuple = 9 lowercase_ : Optional[int] = 4 * 9 lowercase_ : List[Any] = 6 for peter_total in range(UpperCAmelCase__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowercase_ : str = (4**9) * (6**6) lowercase_ : List[Any] = peter_wins_count / total_games_number lowercase_ : Dict = round(UpperCAmelCase__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f"""{solution() = }""")
239
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { 'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'], 'configuration_maskformer_swin': ['MaskFormerSwinConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['MaskFormerFeatureExtractor'] SCREAMING_SNAKE_CASE__ = ['MaskFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ 'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'MaskFormerForInstanceSegmentation', 'MaskFormerModel', 'MaskFormerPreTrainedModel', ] SCREAMING_SNAKE_CASE__ = [ 'MaskFormerSwinBackbone', 'MaskFormerSwinModel', 'MaskFormerSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
353
'''simple docstring''' import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def lowercase__ ( )-> Tuple: # Get the sagemaker specific mp parameters from smp_options variable. UpperCamelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. UpperCamelCase = json.loads(__UpperCamelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. UpperCamelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". UpperCamelCase = json.loads(__UpperCamelCase ) if not mpi_options.get("""sagemaker_mpi_enabled""" , __UpperCamelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("""smdistributed""" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class a_ ( lowerCamelCase ): lowercase = field( default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , ) def A__ ( self ) -> Tuple: """simple docstring""" super().__post_init__() warnings.warn( """`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """ """`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , ) @cached_property def A__ ( self ) -> "torch.device": """simple docstring""" logger.info("""PyTorch: setting up devices""" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( """torch.distributed process group is initialized, but local_rank == -1. """ """In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" ) if self.no_cuda: UpperCamelCase = torch.device("""cpu""" ) UpperCamelCase = 0 elif is_sagemaker_model_parallel_available(): UpperCamelCase = smp.local_rank() UpperCamelCase = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE ) UpperCamelCase = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta ) UpperCamelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) ) UpperCamelCase = torch.device("""cuda""" , self.local_rank ) UpperCamelCase = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 UpperCamelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. UpperCamelCase = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta ) UpperCamelCase = torch.device("""cuda""" , self.local_rank ) UpperCamelCase = 1 if device.type == "cuda": torch.cuda.set_device(_SCREAMING_SNAKE_CASE ) return device @property def A__ ( self ) -> Tuple: """simple docstring""" if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def A__ ( self ) -> Optional[Any]: """simple docstring""" return not is_sagemaker_model_parallel_available() @property def A__ ( self ) -> str: """simple docstring""" return False
183
0